code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def alpha(self, x, y, kwargs, k=None):
"""
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
"""
return self.lens_model.alpha(x, y, kwargs, k=k) | deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec | Below is the the instruction that describes the task:
### Input:
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
### Response:
def alpha(self, x, y, kwargs, k=None):
"""
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
"""
return self.lens_model.alpha(x, y, kwargs, k=k) |
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key) | Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value | Below is the the instruction that describes the task:
### Input:
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
### Response:
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key) |
def get(self, wheel=False):
"""Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
"""
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1])
request.urlretrieve(url, save_file)
logger.info('Downloaded package from PyPI: {0}.'.format(save_file))
return save_file | Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable. | Below is the the instruction that describes the task:
### Input:
Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
### Response:
def get(self, wheel=False):
"""Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
"""
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1])
request.urlretrieve(url, save_file)
logger.info('Downloaded package from PyPI: {0}.'.format(save_file))
return save_file |
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1) | Check availability of requested template engine. | Below is the the instruction that describes the task:
### Input:
Check availability of requested template engine.
### Response:
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1) |
def get_model(client, model_id):
"""Sample ID: go/samples-tracker/1510"""
# [START bigquery_get_model]
from google.cloud import bigquery
# TODO(developer): Construct a BigQuery client object.
# client = bigquery.Client()
# TODO(developer): Set model_id to the ID of the model to fetch.
# model_id = 'your-project.your_dataset.your_model'
model = client.get_model(model_id)
full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id)
friendly_name = model.friendly_name
print(
"Got model '{}' with friendly_name '{}'.".format(full_model_id, friendly_name)
) | Sample ID: go/samples-tracker/1510 | Below is the the instruction that describes the task:
### Input:
Sample ID: go/samples-tracker/1510
### Response:
def get_model(client, model_id):
"""Sample ID: go/samples-tracker/1510"""
# [START bigquery_get_model]
from google.cloud import bigquery
# TODO(developer): Construct a BigQuery client object.
# client = bigquery.Client()
# TODO(developer): Set model_id to the ID of the model to fetch.
# model_id = 'your-project.your_dataset.your_model'
model = client.get_model(model_id)
full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id)
friendly_name = model.friendly_name
print(
"Got model '{}' with friendly_name '{}'.".format(full_model_id, friendly_name)
) |
def _download_sdss_image(
self):
"""*download sdss image*
"""
self.log.info('starting the ``_download_sdss_image`` method')
opt = ""
if self.grid:
opt += "G"
if self.label:
opt += "L"
if self.photocat:
opt += "P"
if self.speccat:
opt += "S"
if self.invertColors:
opt += "I"
if len(opt):
opt = "opt=%(opt)s&" % locals()
width = self.pixelWidth
scale = (self.arcminWidth * 60.) / width
converter = unit_conversion(
log=self.log
)
ra = converter.ra_sexegesimal_to_decimal(
ra=self.ra
)
dec = converter.dec_sexegesimal_to_decimal(
dec=self.dec
)
url = """http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s""" % locals(
)
from fundamentals.download import multiobject_download
localUrls = multiobject_download(
urlList=[url],
downloadDirectory=self.downloadDirectory,
log=self.log,
timeStamp=False,
timeout=180,
concurrentDownloads=10,
resetFilename=[self.filename],
credentials=False, # { 'username' : "...", "password", "..." }
longTime=True,
indexFilenames=False
)
print url
self.log.info('completed the ``_download_sdss_image`` method')
return None | *download sdss image* | Below is the the instruction that describes the task:
### Input:
*download sdss image*
### Response:
def _download_sdss_image(
self):
"""*download sdss image*
"""
self.log.info('starting the ``_download_sdss_image`` method')
opt = ""
if self.grid:
opt += "G"
if self.label:
opt += "L"
if self.photocat:
opt += "P"
if self.speccat:
opt += "S"
if self.invertColors:
opt += "I"
if len(opt):
opt = "opt=%(opt)s&" % locals()
width = self.pixelWidth
scale = (self.arcminWidth * 60.) / width
converter = unit_conversion(
log=self.log
)
ra = converter.ra_sexegesimal_to_decimal(
ra=self.ra
)
dec = converter.dec_sexegesimal_to_decimal(
dec=self.dec
)
url = """http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s""" % locals(
)
from fundamentals.download import multiobject_download
localUrls = multiobject_download(
urlList=[url],
downloadDirectory=self.downloadDirectory,
log=self.log,
timeStamp=False,
timeout=180,
concurrentDownloads=10,
resetFilename=[self.filename],
credentials=False, # { 'username' : "...", "password", "..." }
longTime=True,
indexFilenames=False
)
print url
self.log.info('completed the ``_download_sdss_image`` method')
return None |
def get(self, requirement):
"""
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend) | Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches. | Below is the the instruction that describes the task:
### Input:
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
### Response:
def get(self, requirement):
"""
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend) |
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files | Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths | Below is the the instruction that describes the task:
### Input:
Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
### Response:
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files |
def get_enumerations_from_bit_mask(enumeration, mask):
"""
A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask.
"""
return [x for x in enumeration if (x.value & mask) == x.value] | A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask. | Below is the the instruction that describes the task:
### Input:
A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask.
### Response:
def get_enumerations_from_bit_mask(enumeration, mask):
"""
A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask.
"""
return [x for x in enumeration if (x.value & mask) == x.value] |
def create(self, fullname, shortname, category_id, **kwargs):
"""
Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20)
"""
allowed_options = ['idnumber', 'summaryformat',
'format', 'showgrades',
'newsitems', 'startdate',
'maxbytes', 'showreports',
'visible', 'groupmode',
'groupmodeforce', 'jdefaultgroupingid',
'enablecompletion', 'completionstartonenrol',
'completionnotify', 'lang',
'forcetheme']
if valid_options(kwargs, allowed_options):
option_params = {}
for index, key in enumerate(kwargs):
val = kwargs.get(key)
if isinstance(val, bool):
val = int(val)
option_params.update({'courses[0][' + key + ']': val})
params = {'wsfunction': 'core_course_create_courses',
'courses[0][fullname]': fullname,
'courses[0][shortname]': shortname,
'courses[0][categoryid]': category_id}
params.update(option_params)
params.update(self.request_params)
return requests.post(self.api_url, params=params, verify=False) | Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20) | Below is the the instruction that describes the task:
### Input:
Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20)
### Response:
def create(self, fullname, shortname, category_id, **kwargs):
"""
Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20)
"""
allowed_options = ['idnumber', 'summaryformat',
'format', 'showgrades',
'newsitems', 'startdate',
'maxbytes', 'showreports',
'visible', 'groupmode',
'groupmodeforce', 'jdefaultgroupingid',
'enablecompletion', 'completionstartonenrol',
'completionnotify', 'lang',
'forcetheme']
if valid_options(kwargs, allowed_options):
option_params = {}
for index, key in enumerate(kwargs):
val = kwargs.get(key)
if isinstance(val, bool):
val = int(val)
option_params.update({'courses[0][' + key + ']': val})
params = {'wsfunction': 'core_course_create_courses',
'courses[0][fullname]': fullname,
'courses[0][shortname]': shortname,
'courses[0][categoryid]': category_id}
params.update(option_params)
params.update(self.request_params)
return requests.post(self.api_url, params=params, verify=False) |
def _extract_packages(self):
"""
Extract a package in a new directory.
"""
if not hasattr(self, "retrieved_packages_unpacked"):
self.retrieved_packages_unpacked = [self.package_name]
for path in self.retrieved_packages_unpacked:
package_name = basename(path)
self.path_unpacked = join(CFG_UNPACKED_FILES,
package_name.split('.')[0])
self.logger.debug("Extracting package: %s"
% (path.split("/")[-1],))
try:
if "_archival_pdf" in self.path_unpacked:
self.path_unpacked = (self.path_unpacked
.rstrip("_archival_pdf"))
ZipFile(path).extractall(join(self.path_unpacked,
"archival_pdfs"))
else:
ZipFile(path).extractall(self.path_unpacked)
#TarFile.open(path).extractall(self.path_unpacked)
except Exception:
register_exception(alert_admin=True,
prefix="OUP error extracting package.")
self.logger.error("Error extraction package file: %s"
% (path,))
if hasattr(self, "path_unpacked"):
return self.path_unpacked | Extract a package in a new directory. | Below is the the instruction that describes the task:
### Input:
Extract a package in a new directory.
### Response:
def _extract_packages(self):
"""
Extract a package in a new directory.
"""
if not hasattr(self, "retrieved_packages_unpacked"):
self.retrieved_packages_unpacked = [self.package_name]
for path in self.retrieved_packages_unpacked:
package_name = basename(path)
self.path_unpacked = join(CFG_UNPACKED_FILES,
package_name.split('.')[0])
self.logger.debug("Extracting package: %s"
% (path.split("/")[-1],))
try:
if "_archival_pdf" in self.path_unpacked:
self.path_unpacked = (self.path_unpacked
.rstrip("_archival_pdf"))
ZipFile(path).extractall(join(self.path_unpacked,
"archival_pdfs"))
else:
ZipFile(path).extractall(self.path_unpacked)
#TarFile.open(path).extractall(self.path_unpacked)
except Exception:
register_exception(alert_admin=True,
prefix="OUP error extracting package.")
self.logger.error("Error extraction package file: %s"
% (path,))
if hasattr(self, "path_unpacked"):
return self.path_unpacked |
def do_commits(self):
"""
Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository
"""
git_clone_command = "git clone " + str(self.git_repo_url)
subprocess.call(git_clone_command, shell=True)
subprocess.check_call(
['touch', 'gitHeart.txt'], cwd=self.repository_name)
self.append_onto_file(self.repository_name+"/gitHeart.txt", HEADER)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(
['git', 'commit', '-m', '"Commit Number 0"'], cwd=self.repository_name)
for commit_number in range(1, len(MARKED_DAYS)*self.max_commits+1):
heart_msg = HEART.format(commit_number=str(commit_number))
self.append_onto_file(
self.repository_name+"/gitHeart.txt", heart_msg)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(['git', 'commit', '-m', '"Commit Number {commit_number}"'.format(
commit_number=commit_number)], cwd=self.repository_name)
subprocess.check_call(
['git', 'push', 'origin', 'master'], cwd=self.repository_name) | Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository | Below is the the instruction that describes the task:
### Input:
Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository
### Response:
def do_commits(self):
"""
Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository
"""
git_clone_command = "git clone " + str(self.git_repo_url)
subprocess.call(git_clone_command, shell=True)
subprocess.check_call(
['touch', 'gitHeart.txt'], cwd=self.repository_name)
self.append_onto_file(self.repository_name+"/gitHeart.txt", HEADER)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(
['git', 'commit', '-m', '"Commit Number 0"'], cwd=self.repository_name)
for commit_number in range(1, len(MARKED_DAYS)*self.max_commits+1):
heart_msg = HEART.format(commit_number=str(commit_number))
self.append_onto_file(
self.repository_name+"/gitHeart.txt", heart_msg)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(['git', 'commit', '-m', '"Commit Number {commit_number}"'.format(
commit_number=commit_number)], cwd=self.repository_name)
subprocess.check_call(
['git', 'push', 'origin', 'master'], cwd=self.repository_name) |
def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['protein_coding'])
if include_polymorphic_pseudogenes:
valid_biotypes.add('polymorphic_pseudogene')
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes. | Below is the the instruction that describes the task:
### Input:
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
### Response:
def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['protein_coding'])
if include_polymorphic_pseudogenes:
valid_biotypes.add('polymorphic_pseudogene')
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df |
def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) | Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3 | Below is the the instruction that describes the task:
### Input:
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
### Response:
def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) |
def _init_metadata(self):
"""stub"""
super(EulerRotationAnswerFormRecord, self)._init_metadata()
self._euler_rotation_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'angle_values'),
'element_label': 'Euler Angle Values',
'instructions': 'Provide X, Y, and Z euler angle rotation values',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
super(EulerRotationAnswerFormRecord, self)._init_metadata()
self._euler_rotation_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'angle_values'),
'element_label': 'Euler Angle Values',
'instructions': 'Provide X, Y, and Z euler angle rotation values',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
} |
def append(self, child, *args, **kwargs):
"""See :meth:`AbstractElement.append`"""
#Accept Word instances instead of WordReference, references will be automagically used upon serialisation
if isinstance(child, (Word, Morpheme, Phoneme)) and WordReference in self.ACCEPTED_DATA:
#We don't really append but do an insertion so all references are in proper order
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(child):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
self.data.insert(insertionpoint, child)
return child
elif isinstance(child, AbstractSpanAnnotation): #(covers span roles just as well)
insertionpoint = len(self.data)
try:
firstword = child.wrefs(0)
except IndexError:
#we have no basis to determine an insertionpoint for this child, just append it then
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs)
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(firstword):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
return super(AbstractSpanAnnotation,self).insert(insertionpoint, child, *args, **kwargs)
else:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs) | See :meth:`AbstractElement.append` | Below is the the instruction that describes the task:
### Input:
See :meth:`AbstractElement.append`
### Response:
def append(self, child, *args, **kwargs):
"""See :meth:`AbstractElement.append`"""
#Accept Word instances instead of WordReference, references will be automagically used upon serialisation
if isinstance(child, (Word, Morpheme, Phoneme)) and WordReference in self.ACCEPTED_DATA:
#We don't really append but do an insertion so all references are in proper order
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(child):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
self.data.insert(insertionpoint, child)
return child
elif isinstance(child, AbstractSpanAnnotation): #(covers span roles just as well)
insertionpoint = len(self.data)
try:
firstword = child.wrefs(0)
except IndexError:
#we have no basis to determine an insertionpoint for this child, just append it then
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs)
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(firstword):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
return super(AbstractSpanAnnotation,self).insert(insertionpoint, child, *args, **kwargs)
else:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs) |
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self) | Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type. | Below is the the instruction that describes the task:
### Input:
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
### Response:
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self) |
def conv(self,
num_out_channels,
k_height,
k_width,
d_height=1,
d_width=1,
mode="SAME",
input_layer=None,
num_channels_in=None,
use_batch_norm=None,
stddev=None,
activation="relu",
bias=0.0):
"""Construct a conv2d layer on top of cnn."""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
kernel_initializer = None
if stddev is not None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
name = "conv" + str(self.counts["conv"])
self.counts["conv"] += 1
with tf.variable_scope(name):
strides = [1, d_height, d_width, 1]
if self.data_format == "NCHW":
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != "SAME_RESNET":
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding=mode,
kernel_initializer=kernel_initializer)
else: # Special padding mode for ResNet models
if d_height == 1 and d_width == 1:
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="SAME",
kernel_initializer=kernel_initializer)
else:
rate = 1 # Unused (for 'a trous' convolutions)
kernel_height_effective = k_height + (k_height - 1) * (
rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (
rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end],
[pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == "NCHW":
padding = [
padding[0], padding[3], padding[1], padding[2]
]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="VALID",
kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable(
"biases", [num_out_channels],
self.variable_dtype,
self.dtype,
initializer=tf.constant_initializer(bias))
biased = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=self.data_format),
conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_out_channels
biased = self.batch_norm(**self.batch_norm_config)
if activation == "relu":
conv1 = tf.nn.relu(biased)
elif activation == "linear" or activation is None:
conv1 = biased
elif activation == "tanh":
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type \"%s\"" % activation)
self.top_layer = conv1
self.top_size = num_out_channels
return conv1 | Construct a conv2d layer on top of cnn. | Below is the the instruction that describes the task:
### Input:
Construct a conv2d layer on top of cnn.
### Response:
def conv(self,
num_out_channels,
k_height,
k_width,
d_height=1,
d_width=1,
mode="SAME",
input_layer=None,
num_channels_in=None,
use_batch_norm=None,
stddev=None,
activation="relu",
bias=0.0):
"""Construct a conv2d layer on top of cnn."""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
kernel_initializer = None
if stddev is not None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
name = "conv" + str(self.counts["conv"])
self.counts["conv"] += 1
with tf.variable_scope(name):
strides = [1, d_height, d_width, 1]
if self.data_format == "NCHW":
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != "SAME_RESNET":
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding=mode,
kernel_initializer=kernel_initializer)
else: # Special padding mode for ResNet models
if d_height == 1 and d_width == 1:
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="SAME",
kernel_initializer=kernel_initializer)
else:
rate = 1 # Unused (for 'a trous' convolutions)
kernel_height_effective = k_height + (k_height - 1) * (
rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (
rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end],
[pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == "NCHW":
padding = [
padding[0], padding[3], padding[1], padding[2]
]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="VALID",
kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable(
"biases", [num_out_channels],
self.variable_dtype,
self.dtype,
initializer=tf.constant_initializer(bias))
biased = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=self.data_format),
conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_out_channels
biased = self.batch_norm(**self.batch_norm_config)
if activation == "relu":
conv1 = tf.nn.relu(biased)
elif activation == "linear" or activation is None:
conv1 = biased
elif activation == "tanh":
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type \"%s\"" % activation)
self.top_layer = conv1
self.top_size = num_out_channels
return conv1 |
def any_shared(enum_one, enum_two):
'''
Truthy if any element in enum_one is present in enum_two
'''
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one) | Truthy if any element in enum_one is present in enum_two | Below is the the instruction that describes the task:
### Input:
Truthy if any element in enum_one is present in enum_two
### Response:
def any_shared(enum_one, enum_two):
'''
Truthy if any element in enum_one is present in enum_two
'''
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one) |
def string(self) -> bytes:
"""The capabilities string without the enclosing square brackets."""
if self._raw is not None:
return self._raw
self._raw = raw = BytesFormat(b' ').join(
[b'CAPABILITY', b'IMAP4rev1'] + self.capabilities)
return raw | The capabilities string without the enclosing square brackets. | Below is the the instruction that describes the task:
### Input:
The capabilities string without the enclosing square brackets.
### Response:
def string(self) -> bytes:
"""The capabilities string without the enclosing square brackets."""
if self._raw is not None:
return self._raw
self._raw = raw = BytesFormat(b' ').join(
[b'CAPABILITY', b'IMAP4rev1'] + self.capabilities)
return raw |
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook) | Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority. | Below is the the instruction that describes the task:
### Input:
Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
### Response:
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook) |
def parse_duration(duration):
"""Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object.
"""
duration = str(duration).upper().strip()
elements = ELEMENTS.copy()
for pattern in (SIMPLE_DURATION, COMBINED_DURATION):
if pattern.match(duration):
found = pattern.match(duration).groupdict()
del found['time']
elements.update(dict((k, int(v or 0))
for k, v
in found.items()))
return datetime.timedelta(days=(elements['days'] +
_months_to_days(elements['months']) +
_years_to_days(elements['years'])),
hours=elements['hours'],
minutes=elements['minutes'],
seconds=elements['seconds'])
return ParseError() | Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object. | Below is the the instruction that describes the task:
### Input:
Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object.
### Response:
def parse_duration(duration):
"""Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object.
"""
duration = str(duration).upper().strip()
elements = ELEMENTS.copy()
for pattern in (SIMPLE_DURATION, COMBINED_DURATION):
if pattern.match(duration):
found = pattern.match(duration).groupdict()
del found['time']
elements.update(dict((k, int(v or 0))
for k, v
in found.items()))
return datetime.timedelta(days=(elements['days'] +
_months_to_days(elements['months']) +
_years_to_days(elements['years'])),
hours=elements['hours'],
minutes=elements['minutes'],
seconds=elements['seconds'])
return ParseError() |
def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam,
normal_bam, univ_options, radia_options, disk='100M',
memory='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
### Response:
def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam,
normal_bam, univ_options, radia_options, disk='100M',
memory='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() |
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup) | .. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID' | Below is the the instruction that describes the task:
### Input:
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
### Response:
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup) |
def _initialized_adjustments(self, prstGeom):
"""
Return an initialized list of adjustment values based on the contents
of *prstGeom*
"""
if prstGeom is None:
return []
davs = AutoShapeType.default_adjustment_values(prstGeom.prst)
adjustments = [Adjustment(name, def_val) for name, def_val in davs]
self._update_adjustments_with_actuals(adjustments, prstGeom.gd_lst)
return adjustments | Return an initialized list of adjustment values based on the contents
of *prstGeom* | Below is the the instruction that describes the task:
### Input:
Return an initialized list of adjustment values based on the contents
of *prstGeom*
### Response:
def _initialized_adjustments(self, prstGeom):
"""
Return an initialized list of adjustment values based on the contents
of *prstGeom*
"""
if prstGeom is None:
return []
davs = AutoShapeType.default_adjustment_values(prstGeom.prst)
adjustments = [Adjustment(name, def_val) for name, def_val in davs]
self._update_adjustments_with_actuals(adjustments, prstGeom.gd_lst)
return adjustments |
def _structure_attr_from_tuple(self, a, name, value):
"""Handle an individual attrs attribute."""
type_ = a.type
if type_ is None:
# No type metadata.
return value
return self._structure_func.dispatch(type_)(value, type_) | Handle an individual attrs attribute. | Below is the the instruction that describes the task:
### Input:
Handle an individual attrs attribute.
### Response:
def _structure_attr_from_tuple(self, a, name, value):
"""Handle an individual attrs attribute."""
type_ = a.type
if type_ is None:
# No type metadata.
return value
return self._structure_func.dispatch(type_)(value, type_) |
def ssl_required(allow_non_ssl=False):
"""
Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true.
"""
def wrapper(view_func):
def _checkssl(request, *args, **kwargs):
# allow_non_ssl=True lets non-https requests to come
# through to this view (and hence not redirect)
if hasattr(settings, 'SSL_ENABLED') and settings.SSL_ENABLED \
and not request.is_secure() and not allow_non_ssl:
return HttpResponseRedirect(
request.build_absolute_uri().replace('http://', 'https://'))
return view_func(request, *args, **kwargs)
return _checkssl
return wrapper | Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true. | Below is the the instruction that describes the task:
### Input:
Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true.
### Response:
def ssl_required(allow_non_ssl=False):
"""
Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true.
"""
def wrapper(view_func):
def _checkssl(request, *args, **kwargs):
# allow_non_ssl=True lets non-https requests to come
# through to this view (and hence not redirect)
if hasattr(settings, 'SSL_ENABLED') and settings.SSL_ENABLED \
and not request.is_secure() and not allow_non_ssl:
return HttpResponseRedirect(
request.build_absolute_uri().replace('http://', 'https://'))
return view_func(request, *args, **kwargs)
return _checkssl
return wrapper |
def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized | Return a caveat as a dictionary for export as the JSON
macaroon v1 format. | Below is the the instruction that describes the task:
### Input:
Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
### Response:
def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized |
def sweep(self, mode, speed=None):
"""Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`.
"""
sweep_modes = ['UP', 'DOWN', 'PAUSE', 'ZERO', 'LIMIT']
sweep_speed = ['SLOW', 'FAST', None]
if not mode in sweep_modes:
raise ValueError('Invalid sweep mode.')
if not speed in sweep_speed:
raise ValueError('Invalid sweep speed.')
if speed is None:
self._write('SWEEP {0}'.format(mode))
else:
self._write('SWEEP {0} {1}'.format(mode, speed)) | Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`. | Below is the the instruction that describes the task:
### Input:
Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`.
### Response:
def sweep(self, mode, speed=None):
"""Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`.
"""
sweep_modes = ['UP', 'DOWN', 'PAUSE', 'ZERO', 'LIMIT']
sweep_speed = ['SLOW', 'FAST', None]
if not mode in sweep_modes:
raise ValueError('Invalid sweep mode.')
if not speed in sweep_speed:
raise ValueError('Invalid sweep speed.')
if speed is None:
self._write('SWEEP {0}'.format(mode))
else:
self._write('SWEEP {0} {1}'.format(mode, speed)) |
def cli(env, identifier, allocation, port, routing_type, routing_method):
"""Edit an existing load balancer service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if any input is provided
if not any([allocation, port, routing_type, routing_method]):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
mgr.edit_service_group(loadbal_id,
group_id,
allocation=allocation,
port=port,
routing_type=routing_type,
routing_method=routing_method)
env.fout('Load balancer service group %s is being updated!' % identifier) | Edit an existing load balancer service group. | Below is the the instruction that describes the task:
### Input:
Edit an existing load balancer service group.
### Response:
def cli(env, identifier, allocation, port, routing_type, routing_method):
"""Edit an existing load balancer service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if any input is provided
if not any([allocation, port, routing_type, routing_method]):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
mgr.edit_service_group(loadbal_id,
group_id,
allocation=allocation,
port=port,
routing_type=routing_type,
routing_method=routing_method)
env.fout('Load balancer service group %s is being updated!' % identifier) |
def canonical_extension(fmt_ext):
""" Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
"""
if MimeType.has_value(fmt_ext):
return fmt_ext
try:
return {
'tif': MimeType.TIFF.value,
'jpeg': MimeType.JPG.value,
'hdf5': MimeType.HDF.value,
'h5': MimeType.HDF.value
}[fmt_ext]
except KeyError:
raise ValueError('Data format .{} is not supported'.format(fmt_ext)) | Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str | Below is the the instruction that describes the task:
### Input:
Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
### Response:
def canonical_extension(fmt_ext):
""" Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
"""
if MimeType.has_value(fmt_ext):
return fmt_ext
try:
return {
'tif': MimeType.TIFF.value,
'jpeg': MimeType.JPG.value,
'hdf5': MimeType.HDF.value,
'h5': MimeType.HDF.value
}[fmt_ext]
except KeyError:
raise ValueError('Data format .{} is not supported'.format(fmt_ext)) |
def process_request(
self, path: str, request_headers: Headers
) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
"""
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
"""
if self._process_request is not None:
return self._process_request(path, request_headers)
return None | Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function. | Below is the the instruction that describes the task:
### Input:
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
### Response:
def process_request(
self, path: str, request_headers: Headers
) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
"""
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
"""
if self._process_request is not None:
return self._process_request(path, request_headers)
return None |
def order_id(self, order_id):
"""
Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str
"""
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`")
if len(order_id) > 192:
raise ValueError("Invalid value for `order_id`, length must be less than `192`")
self._order_id = order_id | Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str
### Response:
def order_id(self, order_id):
"""
Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str
"""
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`")
if len(order_id) > 192:
raise ValueError("Invalid value for `order_id`, length must be less than `192`")
self._order_id = order_id |
def get_total_DOS(self):
"""Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double'
"""
warnings.warn("Phonopy.get_total_DOS is deprecated. "
"Use Phonopy.get_total_dos_dict.", DeprecationWarning)
dos = self.get_total_dos_dict()
return dos['frequency_points'], dos['total_dos'] | Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double' | Below is the the instruction that describes the task:
### Input:
Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double'
### Response:
def get_total_DOS(self):
"""Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double'
"""
warnings.warn("Phonopy.get_total_DOS is deprecated. "
"Use Phonopy.get_total_dos_dict.", DeprecationWarning)
dos = self.get_total_dos_dict()
return dos['frequency_points'], dos['total_dos'] |
def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
"""
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
"""
fn = os.path.join(working_directory, 'HPB_run_%s_pyro.pkl'%self.run_id)
for i in range(num_tries):
try:
with open(fn, 'rb') as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning('config file %s not found (trail %i/%i)'%(fn, i+1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!") | loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts | Below is the the instruction that describes the task:
### Input:
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
### Response:
def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
"""
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
"""
fn = os.path.join(working_directory, 'HPB_run_%s_pyro.pkl'%self.run_id)
for i in range(num_tries):
try:
with open(fn, 'rb') as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning('config file %s not found (trail %i/%i)'%(fn, i+1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!") |
def _postQueuedEvents(self, interval=0.01):
"""Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call).
"""
while len(self.eventList) > 0:
(nextEvent, args) = self.eventList.popleft()
nextEvent(*args)
time.sleep(interval) | Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call). | Below is the the instruction that describes the task:
### Input:
Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call).
### Response:
def _postQueuedEvents(self, interval=0.01):
"""Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call).
"""
while len(self.eventList) > 0:
(nextEvent, args) = self.eventList.popleft()
nextEvent(*args)
time.sleep(interval) |
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
# substitute iterator with async one
Paginator.PAGE_ITERATOR_CLS = AioPageIterator
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {'paginate': paginate})
operation_model = self._service_model.\
operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator | Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object. | Below is the the instruction that describes the task:
### Input:
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
### Response:
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
# substitute iterator with async one
Paginator.PAGE_ITERATOR_CLS = AioPageIterator
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {'paginate': paginate})
operation_model = self._service_model.\
operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator |
def iterate_storyline(ctx):
"""
iterate the last storyline from the last visited story part
:param ctx:
:return:
"""
logger.debug('# start iterate')
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': tail['data'],
'step': step,
'topic': tail['topic'],
}])
logger.debug('# [{}] iterate'.format(step))
logger.debug(ctx)
ctx = yield ctx | iterate the last storyline from the last visited story part
:param ctx:
:return: | Below is the the instruction that describes the task:
### Input:
iterate the last storyline from the last visited story part
:param ctx:
:return:
### Response:
def iterate_storyline(ctx):
"""
iterate the last storyline from the last visited story part
:param ctx:
:return:
"""
logger.debug('# start iterate')
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': tail['data'],
'step': step,
'topic': tail['topic'],
}])
logger.debug('# [{}] iterate'.format(step))
logger.debug(ctx)
ctx = yield ctx |
def frac(x, context=None):
"""
Return the fractional part of ``x``.
The result has the same sign as ``x``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_frac,
(BigFloat._implicit_convert(x),),
context,
) | Return the fractional part of ``x``.
The result has the same sign as ``x``. | Below is the the instruction that describes the task:
### Input:
Return the fractional part of ``x``.
The result has the same sign as ``x``.
### Response:
def frac(x, context=None):
"""
Return the fractional part of ``x``.
The result has the same sign as ``x``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_frac,
(BigFloat._implicit_convert(x),),
context,
) |
def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error | set the error | Below is the the instruction that describes the task:
### Input:
set the error
### Response:
def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error |
def create_server_and_run_forever(self, loop=None, **server_config):
"""
Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function.
"""
if loop is None:
import asyncio
loop = asyncio.get_event_loop()
self.create_server(loop=loop, **server_config)
try:
loop.run_forever()
except KeyboardInterrupt:
pass | Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function. | Below is the the instruction that describes the task:
### Input:
Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function.
### Response:
def create_server_and_run_forever(self, loop=None, **server_config):
"""
Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function.
"""
if loop is None:
import asyncio
loop = asyncio.get_event_loop()
self.create_server(loop=loop, **server_config)
try:
loop.run_forever()
except KeyboardInterrupt:
pass |
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst | datetime in UTC -> datetime in local time. | Below is the the instruction that describes the task:
### Input:
datetime in UTC -> datetime in local time.
### Response:
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst |
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer | Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. | Below is the the instruction that describes the task:
### Input:
Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
### Response:
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer |
def _get_columns(self, blueprint):
"""
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
"""
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + ' ' + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns | Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
### Response:
def _get_columns(self, blueprint):
"""
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
"""
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + ' ' + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns |
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" | set sensible defaults | Below is the the instruction that describes the task:
### Input:
set sensible defaults
### Response:
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" |
def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) | Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler. | Below is the the instruction that describes the task:
### Input:
Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
### Response:
def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) |
def to_bytes(value, encoding='utf-8'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value)) | Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes. | Below is the the instruction that describes the task:
### Input:
Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
### Response:
def to_bytes(value, encoding='utf-8'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value)) |
def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
"""
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available. | Below is the the instruction that describes the task:
### Input:
Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
### Response:
def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
"""
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) |
def find_default_container(builder, # type: HasReqsHints
default_container=None, # type: Text
use_biocontainers=None, # type: bool
): # type: (...) -> Optional[Text]
"""Default finder for default containers."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder)
return default_container | Default finder for default containers. | Below is the the instruction that describes the task:
### Input:
Default finder for default containers.
### Response:
def find_default_container(builder, # type: HasReqsHints
default_container=None, # type: Text
use_biocontainers=None, # type: bool
): # type: (...) -> Optional[Text]
"""Default finder for default containers."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder)
return default_container |
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction | Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage? | Below is the the instruction that describes the task:
### Input:
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
### Response:
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction |
def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch | instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials | Below is the the instruction that describes the task:
### Input:
instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials
### Response:
def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch |
def set_log_level(logger_name: str, log_level: str, propagate: bool = False):
"""Set the log level of the specified logger."""
log = logging.getLogger(logger_name)
log.propagate = propagate
log.setLevel(log_level) | Set the log level of the specified logger. | Below is the the instruction that describes the task:
### Input:
Set the log level of the specified logger.
### Response:
def set_log_level(logger_name: str, log_level: str, propagate: bool = False):
"""Set the log level of the specified logger."""
log = logging.getLogger(logger_name)
log.propagate = propagate
log.setLevel(log_level) |
def _gen_last_current_relation(self, post_id):
'''
Generate the relation for the post and last post viewed.
'''
last_post_id = self.get_secure_cookie('last_post_uid')
if last_post_id:
last_post_id = last_post_id.decode('utf-8')
self.set_secure_cookie('last_post_uid', post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id) | Generate the relation for the post and last post viewed. | Below is the the instruction that describes the task:
### Input:
Generate the relation for the post and last post viewed.
### Response:
def _gen_last_current_relation(self, post_id):
'''
Generate the relation for the post and last post viewed.
'''
last_post_id = self.get_secure_cookie('last_post_uid')
if last_post_id:
last_post_id = last_post_id.decode('utf-8')
self.set_secure_cookie('last_post_uid', post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id) |
def allLayers(self):
""" returns all layers for the service """
url = self._url + "/layers"
params = {
"f" : "json"
}
res = self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return_dict = {
"layers" : [],
"tables" : []
}
for k, v in res.items():
if k == "layers":
for val in v:
return_dict['layers'].append(
FeatureLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
elif k == "tables":
for val in v:
return_dict['tables'].append(
TableLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
del k,v
return return_dict | returns all layers for the service | Below is the the instruction that describes the task:
### Input:
returns all layers for the service
### Response:
def allLayers(self):
""" returns all layers for the service """
url = self._url + "/layers"
params = {
"f" : "json"
}
res = self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return_dict = {
"layers" : [],
"tables" : []
}
for k, v in res.items():
if k == "layers":
for val in v:
return_dict['layers'].append(
FeatureLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
elif k == "tables":
for val in v:
return_dict['tables'].append(
TableLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
del k,v
return return_dict |
def torrents(self, **filters):
"""
Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter.
"""
params = {}
for name, value in filters.items():
# make sure that old 'status' argument still works
name = 'filter' if name == 'status' else name
params[name] = value
return self._get('query/torrents', params=params) | Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter. | Below is the the instruction that describes the task:
### Input:
Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter.
### Response:
def torrents(self, **filters):
"""
Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter.
"""
params = {}
for name, value in filters.items():
# make sure that old 'status' argument still works
name = 'filter' if name == 'status' else name
params[name] = value
return self._get('query/torrents', params=params) |
def get_nice_alert(self, value):
"""Return the alert relative to the Nice configuration list"""
value = str(value)
try:
if value in self.get_limit('nice_critical'):
return 'CRITICAL'
except KeyError:
pass
try:
if value in self.get_limit('nice_warning'):
return 'WARNING'
except KeyError:
pass
try:
if value in self.get_limit('nice_careful'):
return 'CAREFUL'
except KeyError:
pass
return 'DEFAULT' | Return the alert relative to the Nice configuration list | Below is the the instruction that describes the task:
### Input:
Return the alert relative to the Nice configuration list
### Response:
def get_nice_alert(self, value):
"""Return the alert relative to the Nice configuration list"""
value = str(value)
try:
if value in self.get_limit('nice_critical'):
return 'CRITICAL'
except KeyError:
pass
try:
if value in self.get_limit('nice_warning'):
return 'WARNING'
except KeyError:
pass
try:
if value in self.get_limit('nice_careful'):
return 'CAREFUL'
except KeyError:
pass
return 'DEFAULT' |
def tag(self, tokens):
"""Return a list of (token, tag) tuples for a given list of tokens."""
tags = []
for token in tokens:
normalized = self.lexicon[token].normalized
for regex, tag in self.regexes:
if regex.match(normalized):
tags.append((token, tag))
break
else:
tags.append((token, None))
return tags | Return a list of (token, tag) tuples for a given list of tokens. | Below is the the instruction that describes the task:
### Input:
Return a list of (token, tag) tuples for a given list of tokens.
### Response:
def tag(self, tokens):
"""Return a list of (token, tag) tuples for a given list of tokens."""
tags = []
for token in tokens:
normalized = self.lexicon[token].normalized
for regex, tag in self.regexes:
if regex.match(normalized):
tags.append((token, tag))
break
else:
tags.append((token, None))
return tags |
def _station_load(network, station, crit_stations):
"""
Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(station, LVStation):
grid_level = 'lv'
else:
grid_level = 'mv'
# maximum allowed apparent power of station for feed-in and load case
s_station = sum([_.type.S_nom for _ in station.transformers])
s_station_allowed_per_case = {}
s_station_allowed_per_case['feedin_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_feedin_case_transformer'.format(
grid_level)]
s_station_allowed_per_case['load_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_load_case_transformer'.format(
grid_level)]
# maximum allowed apparent power of station in each time step
s_station_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: s_station_allowed_per_case[_])
try:
if isinstance(station, LVStation):
s_station_pfa = network.results.s_res(
station.transformers).sum(axis=1)
else:
s_station_pfa = network.results.s_res([station]).iloc[:, 0]
s_res = s_station_allowed - s_station_pfa
s_res = s_res[s_res < 0]
# check if maximum allowed apparent power of station exceeds
# apparent power from power flow analysis at any time step
if not s_res.empty:
# find out largest relative deviation
load_factor = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: network.config[
'grid_expansion_load_factors'][
'{}_{}_transformer'.format(grid_level, _)])
relative_s_res = load_factor * s_res
crit_stations = crit_stations.append(pd.DataFrame(
{'s_pfa': s_station_pfa.loc[relative_s_res.idxmin()],
'time_index': relative_s_res.idxmin()},
index=[station]))
except KeyError:
logger.debug('No results for {} station to check overloading.'.format(
grid_level.upper()))
return crit_stations | Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`. | Below is the the instruction that describes the task:
### Input:
Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
### Response:
def _station_load(network, station, crit_stations):
"""
Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(station, LVStation):
grid_level = 'lv'
else:
grid_level = 'mv'
# maximum allowed apparent power of station for feed-in and load case
s_station = sum([_.type.S_nom for _ in station.transformers])
s_station_allowed_per_case = {}
s_station_allowed_per_case['feedin_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_feedin_case_transformer'.format(
grid_level)]
s_station_allowed_per_case['load_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_load_case_transformer'.format(
grid_level)]
# maximum allowed apparent power of station in each time step
s_station_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: s_station_allowed_per_case[_])
try:
if isinstance(station, LVStation):
s_station_pfa = network.results.s_res(
station.transformers).sum(axis=1)
else:
s_station_pfa = network.results.s_res([station]).iloc[:, 0]
s_res = s_station_allowed - s_station_pfa
s_res = s_res[s_res < 0]
# check if maximum allowed apparent power of station exceeds
# apparent power from power flow analysis at any time step
if not s_res.empty:
# find out largest relative deviation
load_factor = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: network.config[
'grid_expansion_load_factors'][
'{}_{}_transformer'.format(grid_level, _)])
relative_s_res = load_factor * s_res
crit_stations = crit_stations.append(pd.DataFrame(
{'s_pfa': s_station_pfa.loc[relative_s_res.idxmin()],
'time_index': relative_s_res.idxmin()},
index=[station]))
except KeyError:
logger.debug('No results for {} station to check overloading.'.format(
grid_level.upper()))
return crit_stations |
def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):
"""
Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
}
"""
query = Query(self.tracker)
metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)
# Parse the response
ret = {}
ret["starttime"] = start_time
ret["endtime"] = end_time
ret["timeline"] = []
for metric in metrics:
tl = {
"data": metric.timeline
}
if metric.instance:
tl["instance"] = metric.instance
ret["timeline"].append(tl)
raise tornado.gen.Return(ret) | Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
} | Below is the the instruction that describes the task:
### Input:
Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
}
### Response:
def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):
"""
Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
}
"""
query = Query(self.tracker)
metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)
# Parse the response
ret = {}
ret["starttime"] = start_time
ret["endtime"] = end_time
ret["timeline"] = []
for metric in metrics:
tl = {
"data": metric.timeline
}
if metric.instance:
tl["instance"] = metric.instance
ret["timeline"].append(tl)
raise tornado.gen.Return(ret) |
def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) | Log access. | Below is the the instruction that describes the task:
### Input:
Log access.
### Response:
def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) |
def p_ind8_I(p):
""" reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP
"""
expr = p[4]
if p[3] == '-':
expr = Expr.makenode(Container('-', p.lineno(3)), expr)
p[0] = ('(%s+N)' % p[2], expr) | reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP | Below is the the instruction that describes the task:
### Input:
reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP
### Response:
def p_ind8_I(p):
""" reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP
"""
expr = p[4]
if p[3] == '-':
expr = Expr.makenode(Container('-', p.lineno(3)), expr)
p[0] = ('(%s+N)' % p[2], expr) |
def get_size(vm_):
'''
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and six.text_type(vm_size) in sizes:
return sizes[vm_size]['InstanceTypeId']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) | Return the VM's size. Used by create_node(). | Below is the the instruction that describes the task:
### Input:
Return the VM's size. Used by create_node().
### Response:
def get_size(vm_):
'''
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and six.text_type(vm_size) in sizes:
return sizes[vm_size]['InstanceTypeId']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) |
def build(values):
'''
Parameters
----------
values: [term, ...]
Returns
-------
IndexStore
'''
idxstore = IndexStore()
idxstore._i2val = list(values)
idxstore._val2i = {term:i for i,term in enumerate(values)}
idxstore._next_i = len(values)
return idxstore | Parameters
----------
values: [term, ...]
Returns
-------
IndexStore | Below is the the instruction that describes the task:
### Input:
Parameters
----------
values: [term, ...]
Returns
-------
IndexStore
### Response:
def build(values):
'''
Parameters
----------
values: [term, ...]
Returns
-------
IndexStore
'''
idxstore = IndexStore()
idxstore._i2val = list(values)
idxstore._val2i = {term:i for i,term in enumerate(values)}
idxstore._next_i = len(values)
return idxstore |
def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None):
"""Perform the query and calculate the time range based on the relative values."""
assert event_time is None or isinstance(event_time, datetime.datetime)
assert relative_duration_before is None or isinstance(relative_duration_before, str)
assert relative_duration_after is None or isinstance(relative_duration_after, str)
if event_time is None:
# use now as the default
event_time = datetime.datetime.now()
# use preconfigured defaults
if relative_duration_before is None:
relative_duration_before = self.relative_duration_before
if relative_duration_after is None:
relative_duration_after = self.relative_duration_after
time_start = event_time - create_timedelta(relative_duration_before)
time_end = event_time + create_timedelta(relative_duration_after)
return self.query_with_time(query, time_start, time_end) | Perform the query and calculate the time range based on the relative values. | Below is the the instruction that describes the task:
### Input:
Perform the query and calculate the time range based on the relative values.
### Response:
def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None):
"""Perform the query and calculate the time range based on the relative values."""
assert event_time is None or isinstance(event_time, datetime.datetime)
assert relative_duration_before is None or isinstance(relative_duration_before, str)
assert relative_duration_after is None or isinstance(relative_duration_after, str)
if event_time is None:
# use now as the default
event_time = datetime.datetime.now()
# use preconfigured defaults
if relative_duration_before is None:
relative_duration_before = self.relative_duration_before
if relative_duration_after is None:
relative_duration_after = self.relative_duration_after
time_start = event_time - create_timedelta(relative_duration_before)
time_end = event_time + create_timedelta(relative_duration_after)
return self.query_with_time(query, time_start, time_end) |
def delete_date(self, date):
"""
Remove the date line from the textual representation. This doesn't
remove any entry line.
"""
self.lines = [
line for line in self.lines
if not isinstance(line, DateLine) or line.date != date
]
self.lines = trim(self.lines) | Remove the date line from the textual representation. This doesn't
remove any entry line. | Below is the the instruction that describes the task:
### Input:
Remove the date line from the textual representation. This doesn't
remove any entry line.
### Response:
def delete_date(self, date):
"""
Remove the date line from the textual representation. This doesn't
remove any entry line.
"""
self.lines = [
line for line in self.lines
if not isinstance(line, DateLine) or line.date != date
]
self.lines = trim(self.lines) |
def update_from_sam(self, sam, sam_reader):
'''Updates graph info from a pysam.AlignedSegment object'''
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | Updates graph info from a pysam.AlignedSegment object | Below is the the instruction that describes the task:
### Input:
Updates graph info from a pysam.AlignedSegment object
### Response:
def update_from_sam(self, sam, sam_reader):
'''Updates graph info from a pysam.AlignedSegment object'''
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link |
def build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory,
plot=True):
"""
*Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import yaml
import numpy as np
## LOCAL APPLICATION ##
################ >ACTION(S) ################
dataDir = pathToOutputDirectory + "/k_corrections/"
filters = ['g', 'r', 'i', 'z']
fileName = pathToOutputDirectory + "/transient_light_curves.yaml"
stream = file(fileName, 'r')
generatedLCs = yaml.load(stream)
models = generatedLCs.keys()
kCorList = []
for i in range(len(redshiftArray)):
redshift = redshiftArray[i]
kCorDict = {}
for model in models:
for ffilter in filters:
filterDir = dataDir + model + "/" + ffilter
strRed = "%0.3f" % (redshift,)
fileName = filterDir + "/z" + \
str(strRed).replace(".", "pt") + "_poly.yaml"
try:
stream = file(fileName, 'r')
yamlContent = yaml.load(stream)
# log.info('yamlContent %s' % (yamlContent,))
stream.close()
flatPoly = np.poly1d(yamlContent['polyCoeffs'])
except:
flatPoly = None
kCorDict[ffilter] = flatPoly
kCorList.append(kCorDict)
kCorArray = np.array(kCorList)
return kCorArray | *Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None | Below is the the instruction that describes the task:
### Input:
*Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None
### Response:
def build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory,
plot=True):
"""
*Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import yaml
import numpy as np
## LOCAL APPLICATION ##
################ >ACTION(S) ################
dataDir = pathToOutputDirectory + "/k_corrections/"
filters = ['g', 'r', 'i', 'z']
fileName = pathToOutputDirectory + "/transient_light_curves.yaml"
stream = file(fileName, 'r')
generatedLCs = yaml.load(stream)
models = generatedLCs.keys()
kCorList = []
for i in range(len(redshiftArray)):
redshift = redshiftArray[i]
kCorDict = {}
for model in models:
for ffilter in filters:
filterDir = dataDir + model + "/" + ffilter
strRed = "%0.3f" % (redshift,)
fileName = filterDir + "/z" + \
str(strRed).replace(".", "pt") + "_poly.yaml"
try:
stream = file(fileName, 'r')
yamlContent = yaml.load(stream)
# log.info('yamlContent %s' % (yamlContent,))
stream.close()
flatPoly = np.poly1d(yamlContent['polyCoeffs'])
except:
flatPoly = None
kCorDict[ffilter] = flatPoly
kCorList.append(kCorDict)
kCorArray = np.array(kCorList)
return kCorArray |
def list_users(app, appbuilder):
"""
List all users on the database
"""
_appbuilder = import_application(app, appbuilder)
echo_header("List of users")
for user in _appbuilder.sm.get_all_users():
click.echo(
"username:{0} | email:{1} | role:{2}".format(
user.username, user.email, user.roles
)
) | List all users on the database | Below is the the instruction that describes the task:
### Input:
List all users on the database
### Response:
def list_users(app, appbuilder):
"""
List all users on the database
"""
_appbuilder = import_application(app, appbuilder)
echo_header("List of users")
for user in _appbuilder.sm.get_all_users():
click.echo(
"username:{0} | email:{1} | role:{2}".format(
user.username, user.email, user.roles
)
) |
def get_user_roles(user):
"""Get a list of a users's roles."""
if user:
groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related.
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return [] | Get a list of a users's roles. | Below is the the instruction that describes the task:
### Input:
Get a list of a users's roles.
### Response:
def get_user_roles(user):
"""Get a list of a users's roles."""
if user:
groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related.
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return [] |
def env():
"""Verify NVME variables and construct exported variables"""
if cij.ssh.env():
cij.err("cij.nvme.env: invalid SSH environment")
return 1
nvme = cij.env_to_dict(PREFIX, REQUIRED)
nvme["DEV_PATH"] = os.path.join("/dev", nvme["DEV_NAME"])
# get version, chunks, luns and chs
try:
sysfs = os.path.join("/sys/class/block", nvme["DEV_NAME"], "lightnvm")
nvme["LNVM_VERSION"] = cat_file(os.path.join(sysfs, "version"))
if nvme["LNVM_VERSION"] == "2.0":
luns = "punits"
chs = "groups"
elif nvme["LNVM_VERSION"] == "1.2":
luns = "num_luns"
chs = "num_channels"
else:
raise RuntimeError("cij.nvme.env: invalid lnvm version: %s" % nvme["LNVM_VERSION"])
nvme["LNVM_NUM_CHUNKS"] = cat_file(os.path.join(sysfs, "chunks"))
nvme["LNVM_NUM_LUNS"] = cat_file(os.path.join(sysfs, luns))
nvme["LNVM_NUM_CHS"] = cat_file(os.path.join(sysfs, chs))
nvme["LNVM_TOTAL_LUNS"] = str(int(nvme["LNVM_NUM_LUNS"]) * int(nvme["LNVM_NUM_CHS"]))
nvme["LNVM_TOTAL_CHUNKS"] = str(int(nvme["LNVM_TOTAL_LUNS"]) * int(nvme["LNVM_NUM_CHUNKS"]))
# get spec version by identify namespace data struct
if nvme["LNVM_VERSION"] == "2.0":
cmd = ["nvme", "id-ctrl", nvme["DEV_PATH"], "--raw-binary"]
status, stdout, _ = cij.ssh.command(cmd, shell=True)
if status:
raise RuntimeError("cij.nvme.env: nvme id-ctrl fail")
buff = cij.bin.Buffer(types=IdentifyCDS, length=1)
buff.memcopy(stdout)
if buff[0].VS[1023] == 0x5a:
nvme["SPEC_VERSION"] = "Denali"
else:
nvme["SPEC_VERSION"] = "Spec20"
else:
nvme["SPEC_VERSION"] = "Spec12"
# get chunk meta information
nvme["LNVM_CHUNK_META_LENGTH"] = str(get_sizeof_descriptor_table(nvme["SPEC_VERSION"]))
nvme["LNVM_CHUNK_META_SIZE"] = str(int(nvme["LNVM_CHUNK_META_LENGTH"]) *
int(nvme["LNVM_TOTAL_CHUNKS"]))
except StandardError:
traceback.print_exc()
return 1
cij.env_export(PREFIX, EXPORTED, nvme)
return 0 | Verify NVME variables and construct exported variables | Below is the the instruction that describes the task:
### Input:
Verify NVME variables and construct exported variables
### Response:
def env():
"""Verify NVME variables and construct exported variables"""
if cij.ssh.env():
cij.err("cij.nvme.env: invalid SSH environment")
return 1
nvme = cij.env_to_dict(PREFIX, REQUIRED)
nvme["DEV_PATH"] = os.path.join("/dev", nvme["DEV_NAME"])
# get version, chunks, luns and chs
try:
sysfs = os.path.join("/sys/class/block", nvme["DEV_NAME"], "lightnvm")
nvme["LNVM_VERSION"] = cat_file(os.path.join(sysfs, "version"))
if nvme["LNVM_VERSION"] == "2.0":
luns = "punits"
chs = "groups"
elif nvme["LNVM_VERSION"] == "1.2":
luns = "num_luns"
chs = "num_channels"
else:
raise RuntimeError("cij.nvme.env: invalid lnvm version: %s" % nvme["LNVM_VERSION"])
nvme["LNVM_NUM_CHUNKS"] = cat_file(os.path.join(sysfs, "chunks"))
nvme["LNVM_NUM_LUNS"] = cat_file(os.path.join(sysfs, luns))
nvme["LNVM_NUM_CHS"] = cat_file(os.path.join(sysfs, chs))
nvme["LNVM_TOTAL_LUNS"] = str(int(nvme["LNVM_NUM_LUNS"]) * int(nvme["LNVM_NUM_CHS"]))
nvme["LNVM_TOTAL_CHUNKS"] = str(int(nvme["LNVM_TOTAL_LUNS"]) * int(nvme["LNVM_NUM_CHUNKS"]))
# get spec version by identify namespace data struct
if nvme["LNVM_VERSION"] == "2.0":
cmd = ["nvme", "id-ctrl", nvme["DEV_PATH"], "--raw-binary"]
status, stdout, _ = cij.ssh.command(cmd, shell=True)
if status:
raise RuntimeError("cij.nvme.env: nvme id-ctrl fail")
buff = cij.bin.Buffer(types=IdentifyCDS, length=1)
buff.memcopy(stdout)
if buff[0].VS[1023] == 0x5a:
nvme["SPEC_VERSION"] = "Denali"
else:
nvme["SPEC_VERSION"] = "Spec20"
else:
nvme["SPEC_VERSION"] = "Spec12"
# get chunk meta information
nvme["LNVM_CHUNK_META_LENGTH"] = str(get_sizeof_descriptor_table(nvme["SPEC_VERSION"]))
nvme["LNVM_CHUNK_META_SIZE"] = str(int(nvme["LNVM_CHUNK_META_LENGTH"]) *
int(nvme["LNVM_TOTAL_CHUNKS"]))
except StandardError:
traceback.print_exc()
return 1
cij.env_export(PREFIX, EXPORTED, nvme)
return 0 |
def get(self, timeout=None):
# type: (float) -> T
"""Return the result or raise the error the function has produced"""
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result | Return the result or raise the error the function has produced | Below is the the instruction that describes the task:
### Input:
Return the result or raise the error the function has produced
### Response:
def get(self, timeout=None):
# type: (float) -> T
"""Return the result or raise the error the function has produced"""
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result |
def has_edge_evidence(self, u: BaseEntity, v: BaseEntity, key: str) -> bool:
"""Check if the given edge has an evidence."""
return self._has_edge_attr(u, v, key, EVIDENCE) | Check if the given edge has an evidence. | Below is the the instruction that describes the task:
### Input:
Check if the given edge has an evidence.
### Response:
def has_edge_evidence(self, u: BaseEntity, v: BaseEntity, key: str) -> bool:
"""Check if the given edge has an evidence."""
return self._has_edge_attr(u, v, key, EVIDENCE) |
def process_request(self, req, resp):
""" Process the request before routing it.
We always enforce the use of SSL.
"""
if goldman.config.TLS_REQUIRED and req.protocol != 'https':
abort(TLSRequired) | Process the request before routing it.
We always enforce the use of SSL. | Below is the the instruction that describes the task:
### Input:
Process the request before routing it.
We always enforce the use of SSL.
### Response:
def process_request(self, req, resp):
""" Process the request before routing it.
We always enforce the use of SSL.
"""
if goldman.config.TLS_REQUIRED and req.protocol != 'https':
abort(TLSRequired) |
def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor:
""" Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match.
"""
if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb:
raise ValueError('Incompatibly vectors. Qubits and rank must match')
vec1 = vec1.permute(vec0.qubits) # Make sure qubits in same order
return bk.inner(vec0.tensor, vec1.tensor) | Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match. | Below is the the instruction that describes the task:
### Input:
Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match.
### Response:
def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor:
""" Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match.
"""
if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb:
raise ValueError('Incompatibly vectors. Qubits and rank must match')
vec1 = vec1.permute(vec0.qubits) # Make sure qubits in same order
return bk.inner(vec0.tensor, vec1.tensor) |
def block_matrix(A, B, C, D):
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
"""
return vstackm((hstackm((A, B)), hstackm((C, D)))) | r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``. | Below is the the instruction that describes the task:
### Input:
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
### Response:
def block_matrix(A, B, C, D):
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
"""
return vstackm((hstackm((A, B)), hstackm((C, D)))) |
def exam_reliability_by_datetime(
datetime_axis, datetime_new_axis, reliable_distance):
"""A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds.
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return exam_reliability(numeric_datetime_axis, numeric_datetime_new_axis,
reliable_distance, precision=0) | A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds. | Below is the the instruction that describes the task:
### Input:
A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds.
### Response:
def exam_reliability_by_datetime(
datetime_axis, datetime_new_axis, reliable_distance):
"""A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds.
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return exam_reliability(numeric_datetime_axis, numeric_datetime_new_axis,
reliable_distance, precision=0) |
def openBiocamFile(filename, verbose=False):
"""Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller."""
rf = h5py.File(filename, 'r')
# Read recording variables
recVars = rf.require_group('3BRecInfo/3BRecVars/')
# bitDepth = recVars['BitDepth'].value[0]
# maxV = recVars['MaxVolt'].value[0]
# minV = recVars['MinVolt'].value[0]
nFrames = recVars['NRecFrames'][0]
samplingRate = recVars['SamplingRate'][0]
signalInv = recVars['SignalInversion'][0]
# Read chip variables
chipVars = rf.require_group('3BRecInfo/3BMeaChip/')
nCols = chipVars['NCols'][0]
# Get the actual number of channels used in the recording
file_format = rf['3BData'].attrs.get('Version')
if file_format == 100:
nRecCh = len(rf['3BData/Raw'][0])
elif file_format == 101:
nRecCh = int(1. * rf['3BData/Raw'].shape[0] / nFrames)
else:
raise Exception('Unknown data file format.')
if verbose:
print('# 3Brain data format:', file_format, 'signal inversion', signalInv)
print('# signal range: ', recVars['MinVolt'][0], '- ', recVars['MaxVolt'][0])
print('# channels: ', nRecCh)
print('# frames: ', nFrames)
print('# sampling rate: ', samplingRate)
# get channel locations
r = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Row']
c = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Col']
rawIndices = np.vstack((r, c)).T
# assign channel numbers
chIndices = np.array([(x - 1) + (y - 1) * nCols for (y, x) in rawIndices])
# determine correct function to read data
if verbose:
print("# Signal inversion looks like " + str(signalInv) + ", guessing correct method for data access.")
print("# If your results look wrong, signal polarity is may be wrong.")
if file_format == 100:
if signalInv == -1:
read_function = readHDF5t_100
else:
read_function = readHDF5t_100_i
else:
if signalInv == -1:
read_function = readHDF5t_101_i
else:
read_function = readHDF5t_101
return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function) | Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller. | Below is the the instruction that describes the task:
### Input:
Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller.
### Response:
def openBiocamFile(filename, verbose=False):
"""Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller."""
rf = h5py.File(filename, 'r')
# Read recording variables
recVars = rf.require_group('3BRecInfo/3BRecVars/')
# bitDepth = recVars['BitDepth'].value[0]
# maxV = recVars['MaxVolt'].value[0]
# minV = recVars['MinVolt'].value[0]
nFrames = recVars['NRecFrames'][0]
samplingRate = recVars['SamplingRate'][0]
signalInv = recVars['SignalInversion'][0]
# Read chip variables
chipVars = rf.require_group('3BRecInfo/3BMeaChip/')
nCols = chipVars['NCols'][0]
# Get the actual number of channels used in the recording
file_format = rf['3BData'].attrs.get('Version')
if file_format == 100:
nRecCh = len(rf['3BData/Raw'][0])
elif file_format == 101:
nRecCh = int(1. * rf['3BData/Raw'].shape[0] / nFrames)
else:
raise Exception('Unknown data file format.')
if verbose:
print('# 3Brain data format:', file_format, 'signal inversion', signalInv)
print('# signal range: ', recVars['MinVolt'][0], '- ', recVars['MaxVolt'][0])
print('# channels: ', nRecCh)
print('# frames: ', nFrames)
print('# sampling rate: ', samplingRate)
# get channel locations
r = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Row']
c = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Col']
rawIndices = np.vstack((r, c)).T
# assign channel numbers
chIndices = np.array([(x - 1) + (y - 1) * nCols for (y, x) in rawIndices])
# determine correct function to read data
if verbose:
print("# Signal inversion looks like " + str(signalInv) + ", guessing correct method for data access.")
print("# If your results look wrong, signal polarity is may be wrong.")
if file_format == 100:
if signalInv == -1:
read_function = readHDF5t_100
else:
read_function = readHDF5t_100_i
else:
if signalInv == -1:
read_function = readHDF5t_101_i
else:
read_function = readHDF5t_101
return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function) |
def compile_dependencies(self, sourcepath, include_self=False):
"""
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
"""
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) | Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled. | Below is the the instruction that describes the task:
### Input:
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
### Response:
def compile_dependencies(self, sourcepath, include_self=False):
"""
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
"""
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) |
def pop_object(self, element):
'''
Pop the object element if the object contains an higher TLP then allowed.
'''
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element['id'] = ''
element['url'] = ''
element['type'] = ''
element['tags'] = []
element['etlp'] = None
element['title'] = redacted_text
element['tlpColor'] = element['tlpColor']
element['uploaded_on'] = ''
element['uploaded_by'] = ''
element['description'] = redacted_text
element['children_types'] = []
element['summary']['type'] = ''
element['summary']['value'] = ''
element['summary']['title'] = redacted_text
element['summary']['description'] = redacted_text
return element | Pop the object element if the object contains an higher TLP then allowed. | Below is the the instruction that describes the task:
### Input:
Pop the object element if the object contains an higher TLP then allowed.
### Response:
def pop_object(self, element):
'''
Pop the object element if the object contains an higher TLP then allowed.
'''
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element['id'] = ''
element['url'] = ''
element['type'] = ''
element['tags'] = []
element['etlp'] = None
element['title'] = redacted_text
element['tlpColor'] = element['tlpColor']
element['uploaded_on'] = ''
element['uploaded_by'] = ''
element['description'] = redacted_text
element['children_types'] = []
element['summary']['type'] = ''
element['summary']['value'] = ''
element['summary']['title'] = redacted_text
element['summary']['description'] = redacted_text
return element |
def run(self, subdirectory=None):
"""
Write out project file and run GSSHA simulation
"""
with tmp_chdir(self.gssha_directory):
if self.hotstart_minimal_mode:
# remove all optional output cards
for gssha_optional_output_card in self.GSSHA_OPTIONAL_OUTPUT_CARDS:
self._delete_card(gssha_optional_output_card)
# make sure running in SUPER_QUIET mode
self._update_card('SUPER_QUIET', '')
if subdirectory is None:
# give execute folder name
subdirectory = "minimal_hotstart_run_{0}to{1}" \
.format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
else:
# give execute folder name
subdirectory = "run_{0}to{1}".format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
# ensure unique folder naming conventions and add to exisitng event manager
prj_evt_manager = self.project_manager.projectFileEventManager
prj_event = prj_evt_manager.add_event(name=subdirectory,
subfolder=subdirectory,
session=self.db_session)
eventyml_path = self.project_manager.getCard('#GSSHAPY_EVENT_YML') \
.value.strip("'").strip('"')
prj_evt_manager.write(session=self.db_session,
directory=self.gssha_directory,
name=os.path.basename(eventyml_path))
# ensure event manager not propagated to child event
self.project_manager.deleteCard('#GSSHAPY_EVENT_YML',
db_session=self.db_session)
self.db_session.delete(self.project_manager.projectFileEventManager)
self.db_session.commit()
# make working directory
working_directory = os.path.join(self.gssha_directory, prj_event.subfolder)
try:
os.mkdir(working_directory)
except OSError:
pass
# move simulation generated files to working directory
# PRECIP_FILE, HMET_NETCDF, HMET_ASCII, CHAN_POINT_INPUT
# TODO: Move HMET_ASCII files
for sim_card in self.simulation_modified_input_cards:
if sim_card != 'MAPPING_TABLE':
self._update_card_file_location(sim_card, working_directory)
mapping_table_card = self.project_manager.getCard('MAPPING_TABLE')
if mapping_table_card:
# read in mapping table
map_table_object = self.project_manager.readInputFile('MAPPING_TABLE',
self.gssha_directory,
self.db_session,
readIndexMaps=False)
# connect index maps to main gssha directory
for indexMap in map_table_object.indexMaps:
indexMap.filename = os.path.join("..", os.path.basename(indexMap.filename))
# write copy of mapping table to working directory
map_table_filename = os.path.basename(mapping_table_card.value.strip("'").strip('"'))
map_table_object.write(session=self.db_session,
directory=working_directory,
name=map_table_filename,
writeIndexMaps=False)
# connect to other output files in main gssha directory
for gssha_card in self.project_manager.projectCards:
if gssha_card.name not in self.GSSHA_REQUIRED_OUTPUT_PATH_CARDS + \
self.GSSHA_OPTIONAL_OUTPUT_PATH_CARDS + \
tuple(self.simulation_modified_input_cards):
if gssha_card.value:
updated_value = gssha_card.value.strip('"').strip("'")
if updated_value:
if gssha_card.name == "READ_CHAN_HOTSTART":
# there are two required files
# the .dht and .qht
if os.path.exists(updated_value + '.dht') \
and os.path.exists(updated_value + '.qht'):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif os.path.exists(updated_value):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif gssha_card.name == '#INDEXGRID_GUID':
path_split = updated_value.split()
updated_path = os.path.basename(path_split[0].strip('"').strip("'"))
if os.path.exists(updated_path):
new_path = os.path.join("..", os.path.basename(updated_path))
try:
# Get WMS ID for Index Map as part of value
gssha_card.value = '"{0}" "{1}"'.format(new_path, path_split[1])
except:
# Like normal if the ID isn't there
gssha_card.value = '"{0}"'.format(new_path)
else:
log.warning("{0} {1} not found in project directory ...".format("#INDEXGRID_GUID", updated_path))
# make sure project path is ""
self._update_card("PROJECT_PATH", "", True)
# WRITE OUT UPDATED GSSHA PROJECT FILE
self.project_manager.write(session=self.db_session,
directory=working_directory,
name=self.project_manager.name)
with tmp_chdir(working_directory):
# RUN SIMULATION
if self.gssha_executable and find_executable(self.gssha_executable) is not None:
log.info("Running GSSHA simulation ...")
try:
run_gssha_command = [self.gssha_executable,
os.path.join(working_directory, self.project_filename)]
# run GSSHA
out = subprocess.check_output(run_gssha_command)
# write out GSSHA output
log_file_path = os.path.join(working_directory, 'simulation.log')
with open(log_file_path, mode='w') as logfile:
logfile.write(out.decode('utf-8'))
# log to other logger if debug mode on
if log.isEnabledFor(logging.DEBUG):
for line in out.split(b'\n'):
log.debug(line.decode('utf-8'))
except subprocess.CalledProcessError as ex:
log.error("{0}: {1}".format(ex.returncode, ex.output))
else:
missing_exe_error = ("GSSHA executable not found. "
"Skipping GSSHA simulation run ...")
log.error(missing_exe_error)
raise ValueError(missing_exe_error)
return working_directory | Write out project file and run GSSHA simulation | Below is the the instruction that describes the task:
### Input:
Write out project file and run GSSHA simulation
### Response:
def run(self, subdirectory=None):
"""
Write out project file and run GSSHA simulation
"""
with tmp_chdir(self.gssha_directory):
if self.hotstart_minimal_mode:
# remove all optional output cards
for gssha_optional_output_card in self.GSSHA_OPTIONAL_OUTPUT_CARDS:
self._delete_card(gssha_optional_output_card)
# make sure running in SUPER_QUIET mode
self._update_card('SUPER_QUIET', '')
if subdirectory is None:
# give execute folder name
subdirectory = "minimal_hotstart_run_{0}to{1}" \
.format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
else:
# give execute folder name
subdirectory = "run_{0}to{1}".format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
# ensure unique folder naming conventions and add to exisitng event manager
prj_evt_manager = self.project_manager.projectFileEventManager
prj_event = prj_evt_manager.add_event(name=subdirectory,
subfolder=subdirectory,
session=self.db_session)
eventyml_path = self.project_manager.getCard('#GSSHAPY_EVENT_YML') \
.value.strip("'").strip('"')
prj_evt_manager.write(session=self.db_session,
directory=self.gssha_directory,
name=os.path.basename(eventyml_path))
# ensure event manager not propagated to child event
self.project_manager.deleteCard('#GSSHAPY_EVENT_YML',
db_session=self.db_session)
self.db_session.delete(self.project_manager.projectFileEventManager)
self.db_session.commit()
# make working directory
working_directory = os.path.join(self.gssha_directory, prj_event.subfolder)
try:
os.mkdir(working_directory)
except OSError:
pass
# move simulation generated files to working directory
# PRECIP_FILE, HMET_NETCDF, HMET_ASCII, CHAN_POINT_INPUT
# TODO: Move HMET_ASCII files
for sim_card in self.simulation_modified_input_cards:
if sim_card != 'MAPPING_TABLE':
self._update_card_file_location(sim_card, working_directory)
mapping_table_card = self.project_manager.getCard('MAPPING_TABLE')
if mapping_table_card:
# read in mapping table
map_table_object = self.project_manager.readInputFile('MAPPING_TABLE',
self.gssha_directory,
self.db_session,
readIndexMaps=False)
# connect index maps to main gssha directory
for indexMap in map_table_object.indexMaps:
indexMap.filename = os.path.join("..", os.path.basename(indexMap.filename))
# write copy of mapping table to working directory
map_table_filename = os.path.basename(mapping_table_card.value.strip("'").strip('"'))
map_table_object.write(session=self.db_session,
directory=working_directory,
name=map_table_filename,
writeIndexMaps=False)
# connect to other output files in main gssha directory
for gssha_card in self.project_manager.projectCards:
if gssha_card.name not in self.GSSHA_REQUIRED_OUTPUT_PATH_CARDS + \
self.GSSHA_OPTIONAL_OUTPUT_PATH_CARDS + \
tuple(self.simulation_modified_input_cards):
if gssha_card.value:
updated_value = gssha_card.value.strip('"').strip("'")
if updated_value:
if gssha_card.name == "READ_CHAN_HOTSTART":
# there are two required files
# the .dht and .qht
if os.path.exists(updated_value + '.dht') \
and os.path.exists(updated_value + '.qht'):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif os.path.exists(updated_value):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif gssha_card.name == '#INDEXGRID_GUID':
path_split = updated_value.split()
updated_path = os.path.basename(path_split[0].strip('"').strip("'"))
if os.path.exists(updated_path):
new_path = os.path.join("..", os.path.basename(updated_path))
try:
# Get WMS ID for Index Map as part of value
gssha_card.value = '"{0}" "{1}"'.format(new_path, path_split[1])
except:
# Like normal if the ID isn't there
gssha_card.value = '"{0}"'.format(new_path)
else:
log.warning("{0} {1} not found in project directory ...".format("#INDEXGRID_GUID", updated_path))
# make sure project path is ""
self._update_card("PROJECT_PATH", "", True)
# WRITE OUT UPDATED GSSHA PROJECT FILE
self.project_manager.write(session=self.db_session,
directory=working_directory,
name=self.project_manager.name)
with tmp_chdir(working_directory):
# RUN SIMULATION
if self.gssha_executable and find_executable(self.gssha_executable) is not None:
log.info("Running GSSHA simulation ...")
try:
run_gssha_command = [self.gssha_executable,
os.path.join(working_directory, self.project_filename)]
# run GSSHA
out = subprocess.check_output(run_gssha_command)
# write out GSSHA output
log_file_path = os.path.join(working_directory, 'simulation.log')
with open(log_file_path, mode='w') as logfile:
logfile.write(out.decode('utf-8'))
# log to other logger if debug mode on
if log.isEnabledFor(logging.DEBUG):
for line in out.split(b'\n'):
log.debug(line.decode('utf-8'))
except subprocess.CalledProcessError as ex:
log.error("{0}: {1}".format(ex.returncode, ex.output))
else:
missing_exe_error = ("GSSHA executable not found. "
"Skipping GSSHA simulation run ...")
log.error(missing_exe_error)
raise ValueError(missing_exe_error)
return working_directory |
def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | Add a tileset to the map
:param tileset: TiledTileset | Below is the the instruction that describes the task:
### Input:
Add a tileset to the map
:param tileset: TiledTileset
### Response:
def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) |
def audio_visual_key(name=None):
"""
Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'AVI Field'
society_code = basic.numeric(3)
society_code = society_code.setName('Society Code') \
.setResultsName('society_code')
av_number = basic.alphanum(15, extended=True, isLast=True)
field_empty = pp.Regex('[ ]{15}')
field_empty.setParseAction(pp.replaceWith(''))
av_number = av_number | field_empty
av_number = av_number.setName('Audio-Visual Number') \
.setResultsName('av_number')
field = pp.Group(society_code + pp.Optional(av_number))
field.setParseAction(lambda v: _to_avi(v[0]))
field = field.setName(name)
return field.setResultsName('audio_visual_key') | Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field | Below is the the instruction that describes the task:
### Input:
Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
### Response:
def audio_visual_key(name=None):
"""
Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'AVI Field'
society_code = basic.numeric(3)
society_code = society_code.setName('Society Code') \
.setResultsName('society_code')
av_number = basic.alphanum(15, extended=True, isLast=True)
field_empty = pp.Regex('[ ]{15}')
field_empty.setParseAction(pp.replaceWith(''))
av_number = av_number | field_empty
av_number = av_number.setName('Audio-Visual Number') \
.setResultsName('av_number')
field = pp.Group(society_code + pp.Optional(av_number))
field.setParseAction(lambda v: _to_avi(v[0]))
field = field.setName(name)
return field.setResultsName('audio_visual_key') |
def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
"""Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data['mpl avg'] = monthly.mean
data['mpl std ann'] = monthly.std_ann
data['maxdd'] = dly.maxdd
data['maxdd dt'] = dly.maxdd_dt
data['avg dd'] = dly.dd_avg
data['best month'] = monthly.max
data['worst month'] = monthly.min
data['best day'] = dly.max
data['worst day'] = dly.min
data['nmonths'] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = '12/31/%s' % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results['first {0}yrs'.format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate('1/1/%s' % yr_start, '12/31/%s' % yr_end)
results['{0}-{1}'.format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = '1/1/%s' % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results['past {0}yrs'.format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results['ltd'] = summary_fct(self)
return pd.DataFrame(results, index=results.values()[0].keys()).T | Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
### Response:
def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
"""Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data['mpl avg'] = monthly.mean
data['mpl std ann'] = monthly.std_ann
data['maxdd'] = dly.maxdd
data['maxdd dt'] = dly.maxdd_dt
data['avg dd'] = dly.dd_avg
data['best month'] = monthly.max
data['worst month'] = monthly.min
data['best day'] = dly.max
data['worst day'] = dly.min
data['nmonths'] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = '12/31/%s' % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results['first {0}yrs'.format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate('1/1/%s' % yr_start, '12/31/%s' % yr_end)
results['{0}-{1}'.format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = '1/1/%s' % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results['past {0}yrs'.format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results['ltd'] = summary_fct(self)
return pd.DataFrame(results, index=results.values()[0].keys()).T |
def down(self, migration_id):
"""Rollback to migration."""
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info('Rollback migration %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, 'down'):
migration_module.down(self.db)
else:
logger.info('No down method on %s' % migration.filename)
self.collection.remove({'filename': migration.filename}) | Rollback to migration. | Below is the the instruction that describes the task:
### Input:
Rollback to migration.
### Response:
def down(self, migration_id):
"""Rollback to migration."""
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info('Rollback migration %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, 'down'):
migration_module.down(self.db)
else:
logger.info('No down method on %s' % migration.filename)
self.collection.remove({'filename': migration.filename}) |
def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. | Below is the the instruction that describes the task:
### Input:
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
### Response:
def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime |
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx]) | Returns human readable filesize.
:param int bytes_size:
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns human readable filesize.
:param int bytes_size:
:rtype: str
### Response:
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx]) |
def _onNavigate(self, index):
'''Handle selection of path segment.'''
if index > 0:
self.setLocation(
self._locationWidget.itemData(index), interactive=True
) | Handle selection of path segment. | Below is the the instruction that describes the task:
### Input:
Handle selection of path segment.
### Response:
def _onNavigate(self, index):
'''Handle selection of path segment.'''
if index > 0:
self.setLocation(
self._locationWidget.itemData(index), interactive=True
) |
def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args) | Emulates evalsha | Below is the the instruction that describes the task:
### Input:
Emulates evalsha
### Response:
def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args) |
def handle(self, request_headers={}, signature_header=None):
"""Handle request."""
if self.client.webhook_secret is None:
raise ValueError('Error: no webhook secret.')
encoded_header = self._get_signature_header(signature_header, request_headers)
decoded_request = self._decode_request(encoded_header)
if 'type' not in decoded_request:
raise ValueError("Error invalid request: no type field found.")
handler = self._getHandlerForEvent(decoded_request['type'])
if handler is None:
return
if (self._get_fct_number_of_arg(handler) == 1):
handler(decoded_request)
return
handler(decoded_request, decoded_request['type']) | Handle request. | Below is the the instruction that describes the task:
### Input:
Handle request.
### Response:
def handle(self, request_headers={}, signature_header=None):
"""Handle request."""
if self.client.webhook_secret is None:
raise ValueError('Error: no webhook secret.')
encoded_header = self._get_signature_header(signature_header, request_headers)
decoded_request = self._decode_request(encoded_header)
if 'type' not in decoded_request:
raise ValueError("Error invalid request: no type field found.")
handler = self._getHandlerForEvent(decoded_request['type'])
if handler is None:
return
if (self._get_fct_number_of_arg(handler) == 1):
handler(decoded_request)
return
handler(decoded_request, decoded_request['type']) |
def _compute_magnitude_scaling_term(self, C, mag):
"""
Compute and return magnitude scaling term in equation 2,
page 970.
"""
c1 = self.CONSTS['c1']
if mag <= c1:
return C['b1'] + C['b2'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2
else:
return C['b1'] + C['b7'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2 | Compute and return magnitude scaling term in equation 2,
page 970. | Below is the the instruction that describes the task:
### Input:
Compute and return magnitude scaling term in equation 2,
page 970.
### Response:
def _compute_magnitude_scaling_term(self, C, mag):
"""
Compute and return magnitude scaling term in equation 2,
page 970.
"""
c1 = self.CONSTS['c1']
if mag <= c1:
return C['b1'] + C['b2'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2
else:
return C['b1'] + C['b7'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2 |
def random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory,
plot=False):
"""
*Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
## LOCAL APPLICATION ##
import numpy as np
import matplotlib.pyplot as plt
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
randomSNTypeList = []
# CREATE COUNTERS FOR PLOTTING
counters = {}
for k, v in relativeSNRates.iteritems():
counters[k] = 0
for i in range(sampleNumber):
randNum = np.random.rand()
cumulative = 0.
for k, v in relativeSNRates.iteritems():
cumulative = cumulative + v
if (randNum <= cumulative):
randType = k
counters[k] += 1
break
randomSNTypeList.append(randType)
# for k, v in relativeSNRates.iteritems():
# log.debug('%s = %s' % (k, counters[k]))
snTypeArray = np.array(randomSNTypeList)
if plot:
numTypes = len(relativeSNRates)
x = np.arange(1, numTypes + 1, 1)
heights = []
xticks = []
for k, v in relativeSNRates.iteritems():
xticks.append(k)
heights.append(counters[k])
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8])
ax.bar(
x,
heights,
width=0.8,
bottom=0)
plt.xticks(x + 0.5, xticks)
ax.set_xlabel('SN Type')
ax.set_ylabel('Number of SNe')
ax.grid(True)
title = "Weighted SN Distribution"
plt.title(title)
fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png"
plt.savefig(fileName)
plt.clf() # clear figure
return snTypeArray | *Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types | Below is the the instruction that describes the task:
### Input:
*Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types
### Response:
def random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory,
plot=False):
"""
*Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
## LOCAL APPLICATION ##
import numpy as np
import matplotlib.pyplot as plt
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
randomSNTypeList = []
# CREATE COUNTERS FOR PLOTTING
counters = {}
for k, v in relativeSNRates.iteritems():
counters[k] = 0
for i in range(sampleNumber):
randNum = np.random.rand()
cumulative = 0.
for k, v in relativeSNRates.iteritems():
cumulative = cumulative + v
if (randNum <= cumulative):
randType = k
counters[k] += 1
break
randomSNTypeList.append(randType)
# for k, v in relativeSNRates.iteritems():
# log.debug('%s = %s' % (k, counters[k]))
snTypeArray = np.array(randomSNTypeList)
if plot:
numTypes = len(relativeSNRates)
x = np.arange(1, numTypes + 1, 1)
heights = []
xticks = []
for k, v in relativeSNRates.iteritems():
xticks.append(k)
heights.append(counters[k])
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8])
ax.bar(
x,
heights,
width=0.8,
bottom=0)
plt.xticks(x + 0.5, xticks)
ax.set_xlabel('SN Type')
ax.set_ylabel('Number of SNe')
ax.grid(True)
title = "Weighted SN Distribution"
plt.title(title)
fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png"
plt.savefig(fileName)
plt.clf() # clear figure
return snTypeArray |
def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] | Attempts to generate a random acyclic graph, raising an
InvariantError if unable to. | Below is the the instruction that describes the task:
### Input:
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
### Response:
def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] |
def get(cls, uni_char):
"""Return the Unicode block of the given Unicode character"""
uni_char = unicod(uni_char) # Force to Unicode
code_point = ord(uni_char)
if Block._RANGE_KEYS is None:
Block._RANGE_KEYS = sorted(Block._RANGES.keys())
idx = bisect.bisect_left(Block._RANGE_KEYS, code_point)
if (idx > 0 and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx - 1]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx - 1]].end):
return Block._RANGES[Block._RANGE_KEYS[idx - 1]]
elif (idx < len(Block._RANGES) and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx]].end):
return Block._RANGES[Block._RANGE_KEYS[idx]]
else:
return Block.UNKNOWN | Return the Unicode block of the given Unicode character | Below is the the instruction that describes the task:
### Input:
Return the Unicode block of the given Unicode character
### Response:
def get(cls, uni_char):
"""Return the Unicode block of the given Unicode character"""
uni_char = unicod(uni_char) # Force to Unicode
code_point = ord(uni_char)
if Block._RANGE_KEYS is None:
Block._RANGE_KEYS = sorted(Block._RANGES.keys())
idx = bisect.bisect_left(Block._RANGE_KEYS, code_point)
if (idx > 0 and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx - 1]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx - 1]].end):
return Block._RANGES[Block._RANGE_KEYS[idx - 1]]
elif (idx < len(Block._RANGES) and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx]].end):
return Block._RANGES[Block._RANGE_KEYS[idx]]
else:
return Block.UNKNOWN |
def is_interface_up(interface):
"""
Checks if an interface is up.
:param interface: interface name
:returns: boolean
"""
if sys.platform.startswith("linux"):
if interface not in psutil.net_if_addrs():
return False
import fcntl
SIOCGIFFLAGS = 0x8913
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + '\0' * 256)
flags, = struct.unpack('H', result[16:18])
if flags & 1: # check if the up bit is set
return True
return False
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e))
else:
# TODO: Windows & OSX support
return True | Checks if an interface is up.
:param interface: interface name
:returns: boolean | Below is the the instruction that describes the task:
### Input:
Checks if an interface is up.
:param interface: interface name
:returns: boolean
### Response:
def is_interface_up(interface):
"""
Checks if an interface is up.
:param interface: interface name
:returns: boolean
"""
if sys.platform.startswith("linux"):
if interface not in psutil.net_if_addrs():
return False
import fcntl
SIOCGIFFLAGS = 0x8913
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + '\0' * 256)
flags, = struct.unpack('H', result[16:18])
if flags & 1: # check if the up bit is set
return True
return False
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e))
else:
# TODO: Windows & OSX support
return True |
def delete(self, storagemodel:object, modeldefinition = None) -> bool:
""" delete the blob from storage """
deleted = False
blobservice = modeldefinition['blobservice']
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if blobservice.exists(container_name, blob_name):
""" delete """
blob = blobservice.delete_blob(container_name, blob_name)
deleted = True
except Exception as e:
msg = 'can not delete blob {} from storage because {!s}'.format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return deleted | delete the blob from storage | Below is the the instruction that describes the task:
### Input:
delete the blob from storage
### Response:
def delete(self, storagemodel:object, modeldefinition = None) -> bool:
""" delete the blob from storage """
deleted = False
blobservice = modeldefinition['blobservice']
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if blobservice.exists(container_name, blob_name):
""" delete """
blob = blobservice.delete_blob(container_name, blob_name)
deleted = True
except Exception as e:
msg = 'can not delete blob {} from storage because {!s}'.format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return deleted |
def x(self, position=None):
"""Set/Get actor position along x axis."""
p = self.GetPosition()
if position is None:
return p[0]
self.SetPosition(position, p[1], p[2])
if self.trail:
self.updateTrail()
return self | Set/Get actor position along x axis. | Below is the the instruction that describes the task:
### Input:
Set/Get actor position along x axis.
### Response:
def x(self, position=None):
"""Set/Get actor position along x axis."""
p = self.GetPosition()
if position is None:
return p[0]
self.SetPosition(position, p[1], p[2])
if self.trail:
self.updateTrail()
return self |
def players(self, postgame, game_type):
"""Return parsed players."""
for i, attributes in self._players():
yield self._parse_player(i, attributes, postgame, game_type) | Return parsed players. | Below is the the instruction that describes the task:
### Input:
Return parsed players.
### Response:
def players(self, postgame, game_type):
"""Return parsed players."""
for i, attributes in self._players():
yield self._parse_player(i, attributes, postgame, game_type) |
def _hash_outputs(self, index, sighash_type):
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') | BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash | Below is the the instruction that describes the task:
### Input:
BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
### Response:
def _hash_outputs(self, index, sighash_type):
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') |
def constantrotating_to_static(frame_r, frame_i, w, t=None):
"""
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.) | Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame. | Below is the the instruction that describes the task:
### Input:
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
### Response:
def constantrotating_to_static(frame_r, frame_i, w, t=None):
"""
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.) |
def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
"""
logger.info("Loading Google Music songs...")
google_songs = self.api.get_all_songs()
matched_songs, filtered_songs = filter_google_songs(
google_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google Music songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google Music songs".format(len(matched_songs)))
return matched_songs, filtered_songs | Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria. | Below is the the instruction that describes the task:
### Input:
Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
### Response:
def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
"""
logger.info("Loading Google Music songs...")
google_songs = self.api.get_all_songs()
matched_songs, filtered_songs = filter_google_songs(
google_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google Music songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google Music songs".format(len(matched_songs)))
return matched_songs, filtered_songs |
Subsets and Splits