text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def wr_dat_files(self, expanded=False, write_dir=''):
"""
Write each of the specified dat files
"""
# Get the set of dat files to be written, and
# the channels to be written to each file.
file_names, dat_channels = describe_list_indices(self.file_name)
# Get the fmt and byte offset corresponding to each dat file
DAT_FMTS = {}
dat_offsets = {}
for fn in file_names:
DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]]
# byte_offset may not be present
if self.byte_offset is None:
dat_offsets[fn] = 0
else:
dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]]
# Write the dat files
if expanded:
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True,
[self.e_d_signal[ch] for ch in dat_channels[fn]],
self.samps_per_frame, write_dir=write_dir)
else:
# Create a copy to prevent overwrite
dsig = self.d_signal.copy()
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn],
dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1],
dat_offsets[fn], write_dir=write_dir) | 0.002941 |
def rl_wordglue(x):
"""
Glue (set nonbreakable space) short words with word before/after
"""
patterns = (
# частицы склеиваем с предыдущим словом
(re.compile(u'(\\s+)(же|ли|ль|бы|б|ж|ка)([\\.,!\\?:;]?\\s+)', re.UNICODE), u'\u202f\\2\\3'),
# склеиваем короткие слова со следующим словом
(re.compile(u'\\b([a-zA-ZА-Яа-я]{1,3})(\\s+)', re.UNICODE), u'\\1\u202f'),
# склеиваем тире с предыдущим словом
(re.compile(u'(\\s+)([\u2014\\-]+)(\\s+)', re.UNICODE), u'\u202f\\2\\3'),
# склеиваем два последних слова в абзаце между собой
# полагается, что абзацы будут передаваться отдельной строкой
(re.compile(u'([^\\s]+)\\s+([^\\s]+)$', re.UNICODE), u'\\1\u202f\\2'),
)
return _sub_patterns(patterns, x) | 0.00507 |
def build_flags(library, type_, path):
"""Return separated build flags from pkg-config output"""
pkg_config_path = [path]
if "PKG_CONFIG_PATH" in os.environ:
pkg_config_path.append(os.environ['PKG_CONFIG_PATH'])
if "LIB_DIR" in os.environ:
pkg_config_path.append(os.environ['LIB_DIR'])
pkg_config_path.append(os.path.join(os.environ['LIB_DIR'], "pkgconfig"))
options = [
"--static",
{
'I': "--cflags-only-I",
'L': "--libs-only-L",
'l': "--libs-only-l"
}[type_]
]
return [
flag.strip("-{}".format(type_))
for flag
in subprocess.check_output(
["pkg-config"] + options + [library],
env=dict(os.environ, PKG_CONFIG_PATH=":".join(pkg_config_path))
).decode("UTF-8").split()
] | 0.002367 |
def add_user(
self, user, first_name=None, last_name=None, email=None, password=None):
"""Add a new user.
Args:
user (string): User name.
first_name (optional[string]): User's first name. Defaults to None.
last_name (optional[string]): User's last name. Defaults to None.
email: (optional[string]): User's email address. Defaults to None.
password: (optional[string]): User's password. Defaults to None.
Raises:
requests.HTTPError on failure.
"""
self.service.add_user(
user, first_name, last_name, email, password,
self.url_prefix, self.auth, self.session, self.session_send_opts) | 0.005472 |
def collection(self, collection_name):
"""
implements Requirement 15 (/req/core/sfc-md-op)
@type collection_name: string
@param collection_name: name of collection
@returns: feature collection metadata
"""
path = 'collections/{}'.format(collection_name)
url = self._build_url(path)
LOGGER.debug('Request: {}'.format(url))
response = requests.get(url, headers=REQUEST_HEADERS).json()
return response | 0.00409 |
def easeInOutExpo(n):
"""An exponential tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
if n == 0:
return 0
elif n == 1:
return 1
else:
n = n * 2
if n < 1:
return 0.5 * 2**(10 * (n - 1))
else:
n -= 1
# 0.5 * (-() + 2)
return 0.5 * (-1 * (2 ** (-10 * n)) + 2) | 0.004808 |
def max_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the maximum (clockwise) position_sp. Default value is 2400.
Valid values are 2300 to 2700. You must write to the position_sp attribute for
changes to this attribute to take effect.
"""
self._max_pulse_sp, value = self.get_attr_int(self._max_pulse_sp, 'max_pulse_sp')
return value | 0.012987 |
def QA_indicator_ASI(DataFrame, M1=26, M2=10):
"""
LC=REF(CLOSE,1);
AA=ABS(HIGH-LC);
BB=ABS(LOW-LC);
CC=ABS(HIGH-REF(LOW,1));
DD=ABS(LC-REF(OPEN,1));
R=IF(AA>BB AND AA>CC,AA+BB/2+DD/4,IF(BB>CC AND BB>AA,BB+AA/2+DD/4,CC+DD/4));
X=(CLOSE-LC+(CLOSE-OPEN)/2+LC-REF(OPEN,1));
SI=16*X/R*MAX(AA,BB);
ASI:SUM(SI,M1);
ASIT:MA(ASI,M2);
"""
CLOSE = DataFrame['close']
HIGH = DataFrame['high']
LOW = DataFrame['low']
OPEN = DataFrame['open']
LC = REF(CLOSE, 1)
AA = ABS(HIGH - LC)
BB = ABS(LOW-LC)
CC = ABS(HIGH - REF(LOW, 1))
DD = ABS(LC - REF(OPEN, 1))
R = IFAND(AA > BB, AA > CC, AA+BB/2+DD/4,
IFAND(BB > CC, BB > AA, BB+AA/2+DD/4, CC+DD/4))
X = (CLOSE - LC + (CLOSE - OPEN) / 2 + LC - REF(OPEN, 1))
SI = 16*X/R*MAX(AA, BB)
ASI = SUM(SI, M1)
ASIT = MA(ASI, M2)
return pd.DataFrame({
'ASI': ASI, 'ASIT': ASIT
}) | 0.003215 |
def explode_contact_groups_into_contacts(item, contactgroups):
"""
Get all contacts of contact_groups and put them in contacts container
:param item: item where have contact_groups property
:type item: object
:param contactgroups: all contactgroups object
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
if not hasattr(item, 'contact_groups'):
return
# TODO : See if we can remove this if
cgnames = ''
if item.contact_groups:
if isinstance(item.contact_groups, list):
cgnames = item.contact_groups
else:
cgnames = item.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
contactgroup = contactgroups.find_by_name(cgname)
if not contactgroup:
item.add_error("The contact group '%s' defined on the %s '%s' do not exist"
% (cgname, item.__class__.my_type, item.get_name()))
continue
cnames = contactgroups.get_members_of_group(cgname)
# We add contacts into our contacts
if cnames:
if hasattr(item, 'contacts'):
# Fix #1054 - bad contact explosion
# item.contacts.extend(cnames)
item.contacts = item.contacts + cnames
else:
item.contacts = cnames | 0.002623 |
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes)) | 0.004237 |
def search(self, start_ts, end_ts):
"""Query Elasticsearch for documents in a time range.
This method is used to find documents that may be in conflict during
a rollback event in MongoDB.
"""
return self._stream_search(
index=self.meta_index_name,
body={"query": {"range": {"_ts": {"gte": start_ts, "lte": end_ts}}}},
) | 0.007653 |
def run_file_or_script (script=None, file_name='script_from_command_line',
argv=None, params=None, **kwargs):
"""Main entry point for bin/ayrton and unittests."""
runner= Ayrton (**kwargs)
if params is None:
params= ExecParams ()
if script is None:
v= runner.run_file (file_name, argv, params)
else:
if not isinstance(script, bytes):
# the problem here it's that the parser expects the source as bytes
# so we must encode the source before passing it
script = script.encode()
v= runner.run_script (script, file_name, argv, params)
return v | 0.015198 |
def sweepYfiltered(self):
"""
Get the filtered sweepY of the current sweep.
Only works if self.kernel has been generated.
"""
assert self.kernel is not None
return swhlab.common.convolve(self.sweepY,self.kernel) | 0.011583 |
def upload_cart(cart, collection):
"""
Connect to mongo and store your cart in the specified collection.
"""
cart_cols = cart_db()
cart_json = read_json_document(cart.cart_file())
try:
cart_id = cart_cols[collection].save(cart_json)
except MongoErrors.AutoReconnect:
raise JuicerConfigError("Error saving cart to `cart_host`. Ensure that this node is the master.")
return cart_id | 0.004673 |
def _safe_sparse_mask(tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1.
"""
# pylint: disable=protected-access
try:
return tensor.sparse_mask(mask)
except AttributeError:
# TODO(joelgrus): remove this and/or warn at some point
return tensor._sparse_mask(mask) | 0.004246 |
def download_sample(job, sample, inputs):
"""
Download the input sample
:param JobFunctionWrappingJob job: passed by Toil automatically
:param tuple sample: Tuple containing (UUID,URL) of a sample
:param Namespace inputs: Stores input arguments (see main)
"""
uuid, url = sample
job.fileStore.logToMaster('Downloading sample: {}'.format(uuid))
# Download sample
tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv()
# Create copy of inputs for each sample
sample_inputs = argparse.Namespace(**vars(inputs))
sample_inputs.uuid = uuid
sample_inputs.cores = multiprocessing.cpu_count()
# Call children and follow-on jobs
job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G') | 0.003745 |
def _verify_cartesian(s, t):
"""Verifies that a point is in the reference triangle.
I.e., checks that they sum to <= one and are each non-negative.
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
Raises:
ValueError: If the point lies outside the reference triangle.
"""
if s < 0.0 or t < 0.0 or s + t > 1.0:
raise ValueError("Point lies outside reference triangle", s, t) | 0.003774 |
def read_atsplib(filename):
"basic function for reading a ATSP problem on the TSPLIB format"
"NOTE: only works for explicit matrices"
if filename[-3:] == ".gz":
f = gzip.open(filename, 'r')
data = f.readlines()
else:
f = open(filename, 'r')
data = f.readlines()
for line in data:
if line.find("DIMENSION") >= 0:
n = int(line.split()[1])
break
else:
raise IOError("'DIMENSION' keyword not found in file '%s'" % filename)
for line in data:
if line.find("EDGE_WEIGHT_TYPE") >= 0:
if line.split()[1] == "EXPLICIT":
break
else:
raise IOError("'EDGE_WEIGHT_TYPE' is not 'EXPLICIT' in file '%s'" % filename)
for k,line in enumerate(data):
if line.find("EDGE_WEIGHT_SECTION") >= 0:
break
else:
raise IOError("'EDGE_WEIGHT_SECTION' not found in file '%s'" % filename)
c = {}
# flatten list of distances
dist = []
for line in data[k+1:]:
if line.find("EOF") >= 0:
break
for val in line.split():
dist.append(int(val))
k = 0
for i in range(n):
for j in range(n):
c[i+1,j+1] = dist[k]
k += 1
return n,c | 0.004688 |
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at)) | 0.007225 |
def getStrips(self, maxstrips=None):
"""Get comic strips."""
if maxstrips:
word = u"strip" if maxstrips == 1 else "strips"
msg = u'Retrieving %d %s' % (maxstrips, word)
else:
msg = u'Retrieving all strips'
if self.indexes:
if len(self.indexes) == 1:
msg += u" for index %s" % self.indexes[0]
else:
msg += u" for indexes %s" % self.indexes
# Always call starter() since it might initialize cookies.
# See for example Oglaf comic.
self.starter()
urls = [self.getIndexStripUrl(index) for index in self.indexes]
else:
urls = [self.getLatestUrl()]
if self.adult:
msg += u" (including adult content)"
out.info(msg)
for url in urls:
for strip in self.getStripsFor(url, maxstrips):
yield strip | 0.002125 |
def read(parser, stream):
"""
Return an AST from the input ES5 stream.
Arguments
parser
A parser instance.
stream
Either a stream object or a callable that produces one. The
stream object to read from; its 'read' method will be invoked.
If a callable was provided, the 'close' method on its return
value will be called to close the stream.
"""
source = stream() if callable(stream) else stream
try:
text = source.read()
stream_name = getattr(source, 'name', None)
try:
result = parser(text)
except ECMASyntaxError as e:
error_name = repr_compat(stream_name or source)
raise type(e)('%s in %s' % (str(e), error_name))
finally:
if callable(stream):
source.close()
result.sourcepath = stream_name
return result | 0.00113 |
def push_peer(self, peer):
"""Push a new peer into the heap"""
self.order += 1
peer.order = self.order + random.randint(0, self.size())
heap.push(self, peer) | 0.010526 |
def set_default_verify_paths(self):
"""
Specify that the platform provided CA certificates are to be used for
verification purposes. This method has some caveats related to the
binary wheels that cryptography (pyOpenSSL's primary dependency) ships:
* macOS will only load certificates using this method if the user has
the ``[email protected]`` `Homebrew <https://brew.sh>`_ formula installed
in the default location.
* Windows will not work.
* manylinux1 cryptography wheels will work on most common Linux
distributions in pyOpenSSL 17.1.0 and above. pyOpenSSL detects the
manylinux1 wheel and attempts to load roots via a fallback path.
:return: None
"""
# SSL_CTX_set_default_verify_paths will attempt to load certs from
# both a cafile and capath that are set at compile time. However,
# it will first check environment variables and, if present, load
# those paths instead
set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)
_openssl_assert(set_result == 1)
# After attempting to set default_verify_paths we need to know whether
# to go down the fallback path.
# First we'll check to see if any env vars have been set. If so,
# we won't try to do anything else because the user has set the path
# themselves.
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
if not self._check_env_vars_set(dir_env_var, file_env_var):
default_dir = _ffi.string(_lib.X509_get_default_cert_dir())
default_file = _ffi.string(_lib.X509_get_default_cert_file())
# Now we check to see if the default_dir and default_file are set
# to the exact values we use in our manylinux1 builds. If they are
# then we know to load the fallbacks
if (
default_dir == _CRYPTOGRAPHY_MANYLINUX1_CA_DIR and
default_file == _CRYPTOGRAPHY_MANYLINUX1_CA_FILE
):
# This is manylinux1, let's load our fallback paths
self._fallback_default_verify_paths(
_CERTIFICATE_FILE_LOCATIONS,
_CERTIFICATE_PATH_LOCATIONS
) | 0.000806 |
def sline_(self, window_size=5,
y_label="Moving average", chart_label=None):
"""
Get a moving average curve chart to smooth between points
"""
options = dict(window_size=window_size, y_label=y_label)
try:
return self._get_chart("sline", style=self.chart_style,
opts=self.chart_opts, label=chart_label,
options=options)
except Exception as e:
self.err(e, self.sline_, "Can not draw smooth curve chart") | 0.042506 |
def _get_date(data, position, dummy0, opts, dummy1):
"""Decode a BSON datetime to python datetime.datetime."""
end = position + 8
millis = _UNPACK_LONG(data[position:end])[0]
return _millis_to_datetime(millis, opts), end | 0.004237 |
def cosine_similarity(sent1: str, sent2: str) -> float:
"""
Calculates cosine similarity between 2 sentences/documents.
Thanks to @vpekar, see http://goo.gl/ykibJY
"""
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
vector1 = text_to_vector(sent1)
vector2 = text_to_vector(sent2)
cosine = get_cosine(vector1, vector2)
return cosine | 0.002265 |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Ceck the number of markers on chromosome 23
if args.nbChr23 < 0:
msg = ("{}: number of markers on chr 23 must be "
"positive".format(args.nbChr23))
raise ProgramError(msg)
# If we ask for LRR and BAF, we need a directory
if args.lrr_baf:
if not os.path.isdir(args.lrr_baf_raw_dir):
msg = "{}: no such directory".format(args.lrr_baf_raw_dir)
raise ProgramError(msg)
if args.lrr_baf_dpi < 10:
msg = "{}: DPI too low".format(args.dpi)
raise ProgramError(msg)
return True | 0.000791 |
def matchCollapsedState( self ):
"""
Matches the collapsed state for this groupbox.
"""
collapsed = not self.isChecked()
if self._inverted:
collapsed = not collapsed
if ( not self.isCollapsible() or not collapsed ):
for child in self.children():
if ( not isinstance(child, QWidget) ):
continue
child.show()
self.setMaximumHeight(MAX_INT)
self.adjustSize()
if ( self.parent() ):
self.parent().adjustSize()
else:
self.setMaximumHeight(self.collapsedHeight())
for child in self.children():
if ( not isinstance(child, QWidget) ):
continue
child.hide() | 0.020067 |
def filter_module(module, filter_type):
""" filter functions or variables from import module
@params
module: imported module
filter_type: "function" or "variable"
"""
filter_type = ModuleUtils.is_function if filter_type == "function" else ModuleUtils.is_variable
module_functions_dict = dict(filter(filter_type, vars(module).items()))
return module_functions_dict | 0.006772 |
def alignICP(source, target, iters=100, rigid=False):
"""
Return a copy of source actor which is aligned to
target actor through the `Iterative Closest Point` algorithm.
The core of the algorithm is to match each vertex in one surface with
the closest surface point on the other, then apply the transformation
that modify one surface to best match the other (in the least-square sense).
.. hint:: |align1| |align1.py|_
|align2| |align2.py|_
"""
if isinstance(source, Actor):
source = source.polydata()
if isinstance(target, Actor):
target = target.polydata()
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.SetMaximumNumberOfIterations(iters)
if rigid:
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.StartByMatchingCentroidsOn()
icp.Update()
icpTransformFilter = vtk.vtkTransformPolyDataFilter()
icpTransformFilter.SetInputData(source)
icpTransformFilter.SetTransform(icp)
icpTransformFilter.Update()
poly = icpTransformFilter.GetOutput()
actor = Actor(poly)
# actor.info['transform'] = icp.GetLandmarkTransform() # not working!
# do it manually...
sourcePoints = vtk.vtkPoints()
targetPoints = vtk.vtkPoints()
for i in range(10):
p1 = [0, 0, 0]
source.GetPoints().GetPoint(i, p1)
sourcePoints.InsertNextPoint(p1)
p2 = [0, 0, 0]
poly.GetPoints().GetPoint(i, p2)
targetPoints.InsertNextPoint(p2)
# Setup the transform
landmarkTransform = vtk.vtkLandmarkTransform()
landmarkTransform.SetSourceLandmarks(sourcePoints)
landmarkTransform.SetTargetLandmarks(targetPoints)
if rigid:
landmarkTransform.SetModeToRigidBody()
actor.info["transform"] = landmarkTransform
return actor | 0.001077 |
def get_asset_content_lookup_session(self):
"""Pass through to provider get_asset_content_lookup_session"""
return getattr(sessions, 'AssetContentLookupSession')(
provider_session=self._provider_manager.get_asset_content_lookup_session(),
authz_session=self._authz_session) | 0.009585 |
def _asciify_dict(data):
""" Ascii-fies dict keys and values """
ret = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = _remove_accents(key)
key = key.encode('utf-8')
# # note new if
if isinstance(value, unicode):
value = _remove_accents(value)
value = value.encode('utf-8')
elif isinstance(value, list):
value = _asciify_list(value)
elif isinstance(value, dict):
value = _asciify_dict(value)
ret[key] = value
return ret | 0.001709 |
def _compute_mean(self, C, rup, dists, sites, imt):
"""
Returns the mean ground motion acceleration and velocity
"""
mean = (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, rup.mag, dists.rrup) +
self._get_site_amplification_term(C, sites.vs30))
# convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV
# is already in cm/s) and also convert from base 10 to base e.
if imt.name == "PGA":
mean = np.log((10 ** mean) * ((2 * np.pi / 0.01) ** 2) *
1e-2 / g)
elif imt.name == "SA":
mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) *
1e-2 / g)
else:
mean = np.log(10 ** mean)
return mean | 0.002378 |
def main():
'''
Sets up command line parser for Toil/ADAM based k-mer counter, and launches
k-mer counter with optional Spark cluster.
'''
parser = argparse.ArgumentParser()
# add parser arguments
parser.add_argument('--input_path',
help='The full path to the input SAM/BAM/ADAM/FASTQ file.')
parser.add_argument('--output-path',
help='full path where final results will be output.')
parser.add_argument('--kmer-length',
help='Length to use for k-mer counting. Defaults to 20.',
default=20,
type=int)
parser.add_argument('--spark-conf',
help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.',
default=None)
parser.add_argument('--memory',
help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--cores',
help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--workers',
help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.',
default=None,
type=int)
parser.add_argument('--sudo',
help='Run docker containers with sudo. Defaults to False.',
default=False,
action='store_true')
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
Job.Runner.startToil(Job.wrapJobFn(kmer_dag,
args.kmer_length,
args.input_path,
args.output_path,
args.spark_conf,
args.workers,
args.cores,
args.memory,
args.sudo,
checkpoint=True), args) | 0.003259 |
def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None):
"""Determines if a file contains a supported file system types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
"""
if (cls._file_system_remainder_list is None or
cls._file_system_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_FILE_SYSTEM)
cls._file_system_remainder_list = remainder_list
cls._file_system_store = specification_store
if cls._file_system_scanner is None:
cls._file_system_scanner = cls._GetSignatureScanner(
cls._file_system_store)
return cls._GetTypeIndicators(
cls._file_system_scanner, cls._file_system_store,
cls._file_system_remainder_list, path_spec,
resolver_context=resolver_context) | 0.005639 |
def get_label_idx(self, label):
""" Returns the index of a label.
Returns None if not found.
"""
for i in range(len(self)):
if self.mem[i].is_label and self.mem[i].inst == label:
return i
return None | 0.007463 |
def _convert_hdxobjects(self, hdxobjects):
# type: (List[HDXObjectUpperBound]) -> List[HDXObjectUpperBound]
"""Helper function to convert supplied list of HDX objects to a list of dict
Args:
hdxobjects (List[T <= HDXObject]): List of HDX objects to convert
Returns:
List[Dict]: List of HDX objects converted to simple dictionaries
"""
newhdxobjects = list()
for hdxobject in hdxobjects:
newhdxobjects.append(hdxobject.data)
return newhdxobjects | 0.007326 |
def find(self, query):
"""Finds the first cell matching the query.
:param query: A literal string to match or compiled regular expression.
:type query: str, :py:class:`re.RegexObject`
"""
try:
return self._finder(finditem, query)
except StopIteration:
raise CellNotFound(query) | 0.005698 |
def clear_first_angle_projection(self):
"""stub"""
if (self.get_first_angle_projection_metadata().is_read_only() or
self.get_first_angle_projection_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['firstAngle'] = \
self._first_angle_metadata['default_boolean_values'][0] | 0.005525 |
def start(self):
""" Start running the interactive session (blocking) """
self.running = True
while self.running:
self.update_prompt()
try:
self.cmdloop()
except KeyboardInterrupt:
print()
except botocore.exceptions.BotoCoreError as e:
print(e)
except ParseException as e:
print(self.engine.pformat_exc(e))
except Exception:
traceback.print_exc()
self.engine.reset() | 0.00361 |
def t_ID(self, token):
r'[a-zA-Z_][a-zA-Z0-9_-]*'
if token.value in self.KEYWORDS:
token.type = self.KEYWORDS[token.value]
return token
else:
return token | 0.009346 |
def spread(df, key, values, convert=False):
"""
Transforms a "long" DataFrame into a "wide" format using a key and value
column.
If you have a mixed datatype column in your long-format DataFrame then the
default behavior is for the spread columns to be of type `object`, or
string. If you want to try to convert dtypes when spreading, you can set
the convert keyword argument in spread to True.
Args:
key (str, int, or symbolic): Label for the key column.
values (str, int, or symbolic): Label for the values column.
Kwargs:
convert (bool): Boolean indicating whether or not to try and convert
the spread columns to more appropriate data types.
Example:
widened = elongated >> spread(X.variable, X.value)
widened >> head(5)
_ID carat clarity color cut depth price table x y z
0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43
1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31
2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73
3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65
4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62
"""
# Taken mostly from dplython package
columns = df.columns.tolist()
id_cols = [col for col in columns if not col in [key, values]]
temp_index = ['' for i in range(len(df))]
for id_col in id_cols:
temp_index += df[id_col].map(str)
out_df = df.assign(temp_index=temp_index)
out_df = out_df.set_index('temp_index')
spread_data = out_df[[key, values]]
if not all(spread_data.groupby([spread_data.index, key]).agg(
'count').reset_index()[values] < 2):
raise ValueError('Duplicate identifiers')
spread_data = spread_data.pivot(columns=key, values=values)
if convert and (out_df[values].dtype.kind in 'OSaU'):
columns_to_convert = [col for col in spread_data if col not in columns]
spread_data = convert_type(spread_data, columns_to_convert)
out_df = out_df[id_cols].drop_duplicates()
out_df = out_df.merge(spread_data, left_index=True, right_index=True).reset_index(drop=True)
out_df = (out_df >> arrange(id_cols)).reset_index(drop=True)
return out_df | 0.003774 |
def build_resource_dependency_graph(resource_classes,
include_backrefs=False):
"""
Builds a graph of dependencies among the given resource classes.
The dependency graph is a directed graph with member resource classes as
nodes. An edge between two nodes represents a member or collection
attribute.
:param resource_classes: resource classes to determine interdependencies
of.
:type resource_classes: sequence of registered resources.
:param bool include_backrefs: flag indicating if dependencies
introduced by back-references (e.g., a child resource referencing its
parent) should be included in the dependency graph.
"""
def visit(mb_cls, grph, path, incl_backrefs):
for attr_name in get_resource_class_attribute_names(mb_cls):
if is_resource_class_terminal_attribute(mb_cls, attr_name):
continue
child_descr = getattr(mb_cls, attr_name)
child_mb_cls = get_member_class(child_descr.attr_type)
# We do not follow cyclic references back to a resource class
# that is last in the path.
if len(path) > 0 and child_mb_cls is path[-1] \
and not incl_backrefs:
continue
if not grph.has_node(child_mb_cls):
grph.add_node(child_mb_cls)
path.append(mb_cls)
visit(child_mb_cls, grph, path, incl_backrefs)
path.pop()
if not grph.has_edge((mb_cls, child_mb_cls)):
grph.add_edge((mb_cls, child_mb_cls))
dep_grph = digraph()
for resource_class in resource_classes:
mb_cls = get_member_class(resource_class)
if not dep_grph.has_node(mb_cls):
dep_grph.add_node(mb_cls)
visit(mb_cls, dep_grph, [], include_backrefs)
return dep_grph | 0.000528 |
def pack(self):
"""
The `CodeAttribute` in packed byte string form.
"""
with io.BytesIO() as file_out:
file_out.write(pack(
'>HHI',
self.max_stack,
self.max_locals,
len(self._code)
))
file_out.write(self._code)
file_out.write(pack('>H', len(self.exception_table)))
for exception in self.exception_table:
file_out.write(pack('>HHHH', *exception))
self.attributes.pack(file_out)
return file_out.getvalue() | 0.003328 |
async def _load_all_nodes(self):
"""Load all nodes via API."""
get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx)
await get_all_nodes_information.do_api_call()
if not get_all_nodes_information.success:
raise PyVLXException("Unable to retrieve node information")
self.clear()
for notification_frame in get_all_nodes_information.notification_frames:
node = convert_frame_to_node(self.pyvlx, notification_frame)
if node is not None:
self.add(node) | 0.005338 |
def agent():
"""Run the agent, connecting to a (remote) host started independently."""
agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
logging.info("Starting agent:")
with remote_sc2_env.RemoteSC2Env(
map_name=FLAGS.map,
host=FLAGS.host,
host_port=FLAGS.host_port,
lan_port=FLAGS.lan_port,
name=FLAGS.agent_name or agent_name,
race=sc2_env.Race[FLAGS.agent_race],
step_mul=FLAGS.step_mul,
agent_interface_format=sc2_env.parse_agent_interface_format(
feature_screen=FLAGS.feature_screen_size,
feature_minimap=FLAGS.feature_minimap_size,
rgb_screen=FLAGS.rgb_screen_size,
rgb_minimap=FLAGS.rgb_minimap_size,
action_space=FLAGS.action_space,
use_feature_units=FLAGS.use_feature_units),
visualize=FLAGS.render) as env:
agents = [agent_cls()]
logging.info("Connected, starting run_loop.")
try:
run_loop.run_loop(agents, env)
except remote_sc2_env.RestartException:
pass
logging.info("Done.") | 0.008945 |
def parse_transcripts(transcript_lines):
"""Parse and massage the transcript information
There could be multiple lines with information about the same transcript.
This is why it is necessary to parse the transcripts first and then return a dictionary
where all information has been merged.
Args:
transcript_lines(): This could be an iterable with strings or a pandas.DataFrame
Returns:
parsed_transcripts(dict): Map from enstid -> transcript info
"""
LOG.info("Parsing transcripts")
# Parse the transcripts, we need to check if it is a request or a file handle
if isinstance(transcript_lines, DataFrame):
transcripts = parse_ensembl_transcript_request(transcript_lines)
else:
transcripts = parse_ensembl_transcripts(transcript_lines)
# Since there can be multiple lines with information about the same transcript
# we store transcript information in a dictionary for now
parsed_transcripts = {}
# Loop over the parsed transcripts
for tx in transcripts:
tx_id = tx['ensembl_transcript_id']
ens_gene_id = tx['ensembl_gene_id']
# Check if the transcript has been added
# If not, create a new transcript
if not tx_id in parsed_transcripts:
tx_info = {
'chrom': tx['chrom'],
'transcript_start': tx['transcript_start'],
'transcript_end': tx['transcript_end'],
'mrna': set(),
'mrna_predicted': set(),
'nc_rna': set(),
'ensembl_gene_id': ens_gene_id,
'ensembl_transcript_id': tx_id,
}
parsed_transcripts[tx_id] = tx_info
tx_info = parsed_transcripts[tx_id]
# Add the ref seq information
if tx.get('refseq_mrna_predicted'):
tx_info['mrna_predicted'].add(tx['refseq_mrna_predicted'])
if tx.get('refseq_mrna'):
tx_info['mrna'].add(tx['refseq_mrna'])
if tx.get('refseq_ncrna'):
tx_info['nc_rna'].add(tx['refseq_ncrna'])
return parsed_transcripts | 0.003289 |
def _bundle_models(custom_models):
""" Create a JavaScript bundle with selected `models`. """
exports = []
modules = []
def read_json(name):
with io.open(join(bokehjs_dir, "js", name + ".json"), encoding="utf-8") as f:
return json.loads(f.read())
bundles = ["bokeh", "bokeh-api", "bokeh-widgets", "bokeh-tables", "bokeh-gl"]
known_modules = set(sum([ read_json(name) for name in bundles ], []))
custom_impls = _compile_models(custom_models)
extra_modules = {}
def resolve_modules(to_resolve, root):
resolved = {}
for module in to_resolve:
if module.startswith(("./", "../")):
def mkpath(module, ext=""):
return abspath(join(root, *module.split("/")) + ext)
if module.endswith(exts):
path = mkpath(module)
if not exists(path):
raise RuntimeError("no such module: %s" % module)
else:
for ext in exts:
path = mkpath(module, ext)
if exists(path):
break
else:
raise RuntimeError("no such module: %s" % module)
impl = FromFile(path)
compiled = nodejs_compile(impl.code, lang=impl.lang, file=impl.file)
if "error" in compiled:
raise CompilationError(compiled.error)
if impl.lang == "less":
code = _style_template % dict(css=json.dumps(compiled.code))
deps = []
else:
code = compiled.code
deps = compiled.deps
sig = hashlib.sha256(code.encode('utf-8')).hexdigest()
resolved[module] = sig
deps_map = resolve_deps(deps, dirname(path))
if sig not in extra_modules:
extra_modules[sig] = True
modules.append((sig, code, deps_map))
else:
index = module + ("" if module.endswith("/") else "/") + "index"
if index not in known_modules:
raise RuntimeError("no such module: %s" % module)
return resolved
def resolve_deps(deps, root):
custom_modules = set(model.module for model in custom_models.values())
missing = set(deps) - known_modules - custom_modules
return resolve_modules(missing, root)
for model in custom_models.values():
compiled = custom_impls[model.full_name]
deps_map = resolve_deps(compiled.deps, model.path)
exports.append((model.name, model.module))
modules.append((model.module, compiled.code, deps_map))
# sort everything by module name
exports = sorted(exports, key=lambda spec: spec[1])
modules = sorted(modules, key=lambda spec: spec[0])
for i, (module, code, deps) in enumerate(modules):
for name, ref in deps.items():
code = code.replace("""require("%s")""" % name, """require("%s")""" % ref)
code = code.replace("""require('%s')""" % name, """require('%s')""" % ref)
modules[i] = (module, code)
sep = ",\n"
exports = sep.join(_export_template % dict(name=name, module=module) for (name, module) in exports)
modules = sep.join(_module_template % dict(module=module, source=code) for (module, code) in modules)
content = _plugin_template % dict(prelude=_plugin_prelude, exports=exports, modules=modules)
return _plugin_umd % dict(content=content) | 0.003587 |
def get_resource(self, resource_key, repository_type, location,
**variables):
"""Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
repository_type (`str`): What sort of repository to look for the
resource in
location (`str`): location for the repository
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
"""
path = "%s@%s" % (repository_type, location)
repo = self.get_repository(path)
resource = repo.get_resource(**variables)
return resource | 0.00358 |
def predict_leaf_node_assignment(self, test_data, type="Path"):
"""
Predict on a dataset and return the leaf node assignment (only for tree-based models).
:param H2OFrame test_data: Data on which to make predictions.
:param Enum type: How to identify the leaf node. Nodes can be either identified by a path from to the root node
of the tree to the node or by H2O's internal node id. One of: ``"Path"``, ``"Node_ID"`` (default: ``"Path"``).
:returns: A new H2OFrame of predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
assert_is_type(type, None, Enum("Path", "Node_ID"))
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"leaf_node_assignment": True, "leaf_node_assignment_type": type})
return h2o.get_frame(j["predictions_frame"]["name"]) | 0.009278 |
def update_subnet(subnet, name, profile=None):
'''
Updates a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.update_subnet subnet-name new-subnet-name
:param subnet: ID or name of subnet to update
:param name: Name of this subnet
:param profile: Profile to build on (Optional)
:return: Value of updated subnet information
'''
conn = _auth(profile)
return conn.update_subnet(subnet, name) | 0.002208 |
def add_safety_checks(meta, members):
"""
Iterate through each member of the class being created and add a
safety check to every method that isn't marked as read-only.
"""
for member_name, member_value in members.items():
members[member_name] = meta.add_safety_check(
member_name, member_value) | 0.008174 |
def command_ack_send(self, command, result, force_mavlink1=False):
'''
Report status of a command. Includes feedback wether the command was
executed.
command : Command ID, as defined by MAV_CMD enum. (uint16_t)
result : See MAV_RESULT enum (uint8_t)
'''
return self.send(self.command_ack_encode(command, result), force_mavlink1=force_mavlink1) | 0.010121 |
def as_dict(self):
"""
Returns:
dict
"""
dict_fw = {
'id': self.id,
'name': self.name,
'description': self.description,
'rules_direction': self.rules_direction,
'rules_ip_protocol': self.rules_ip_protocol,
'rules_from_port': self.rules_from_port,
'rules_to_port': self.rules_to_port,
'rules_grants_group_id': self.rules_grants_group_id,
'rules_grants_name': self.rules_grants_name,
'rules_grants_cidr_ip': self.rules_grants_cidr_ip,
'rules_description': self.rules_description
}
return dict_fw | 0.002915 |
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
return self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
) | 0.011673 |
def send_command_return(self, command, *arguments):
""" Send command and wait for single line output. """
return self.api.send_command_return(self, command, *arguments) | 0.01087 |
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
"'{method}' was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols) | 0.000636 |
def __find_handles(self, model, **spec):
""" find model instances based on given filter (spec)
The filter is based on available server-calls, so some values might not be available for filtering.
Multiple filter-values is going to do multiple server-calls.
For complex filters in small datasets, it might be faster to fetch all and do your own in-memory filter.
Empty filter will fetch all.
:param model: subclass of EConomicsModel
:param spec: mapping of values to filter by
:return: a list of EConomicsModel instances
"""
server_calls = []
filter_names = dict([(f['name'], f['method'],) for f in model.get_filters()])
if not spec:
server_calls.append({'method': "%s_GetAll" % model.__name__, 'args': []})
else:
for key, value in spec.items():
if not key in filter_names:
raise ValueError("no server-method exists for filtering by '%s'" % key)
args = []
if not hasattr(value, '__iter__'):
value = [value]
if key.endswith('_list'):
vtype = type(value[0]).__name__
# TODO: this surely does not cover all cases of data types
array = self.soap_factory.create('ArrayOf%s' % vtype.capitalize())
getattr(array, "%s" % vtype).extend(value)
args.append(array)
else:
args.extend(value)
method = "%s_%s" % (model.__name__, filter_names[key])
if filter_names[key].startswith('GetAll'):
args = []
server_calls.append({'method': method, 'args': args, 'expect': "%sHandle" % model.__name__})
handles = [
map(Handle, self.fetch_list(scall['method'], scall.get('expect'), *scall['args']))
for scall in server_calls
]
return [h.wsdl for h in reduce(set.intersection, map(set, handles))] | 0.00535 |
def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
if app is None:
app = get_app_qt4([''])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt4 provide a other way to detect this?
return False | 0.003425 |
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True | 0.003778 |
def get_diag(code, command):
""" Generate diagramm and return data """
import tempfile
import shutil
code = code + u'\n'
try:
tmpdir = tempfile.mkdtemp()
fd, diag_name = tempfile.mkstemp(dir=tmpdir)
f = os.fdopen(fd, "w")
f.write(code.encode('utf-8'))
f.close()
format = _draw_mode.lower()
draw_name = diag_name + '.' + format
saved_argv = sys.argv
argv = [diag_name, '-T', format, '-o', draw_name]
if _draw_mode == 'SVG':
argv += ['--ignore-pil']
# Run command
command.main(argv)
# Read image data from file
file_name = diag_name + '.' + _publish_mode.lower()
with io.open(file_name, 'rb') as f:
data = f.read()
f.close()
finally:
for file in os.listdir(tmpdir):
os.unlink(tmpdir + "/" + file)
# os.rmdir will fail -> use shutil
shutil.rmtree(tmpdir)
return data | 0.001006 |
def Datacenters(alias=None, session=None):
"""Return all cloud locations available to the calling alias.
>>> clc.v2.Datacenter.Datacenters(alias=None)
[<clc.APIv2.datacenter.Datacenter instance at 0x101462fc8>, <clc.APIv2.datacenter.Datacenter instance at 0x101464320>]
"""
if not alias: alias = clc.v2.Account.GetAlias(session=session)
datacenters = []
for r in clc.v2.API.Call('GET','datacenters/%s' % alias,{}, session=session):
datacenters.append(Datacenter(location=r['id'],name=r['name'],alias=alias,session=session))
return(datacenters) | 0.03351 |
def create_binary_array(self, key, value):
"""Create method of CRUD operation for binary array data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
"""
data = None
if key is not None and value is not None:
value_encoded = []
for v in value:
try:
# py2
# convert to bytes as required for b64encode
# decode bytes for json serialization as required for json dumps
value_encoded.append(base64.b64encode(bytes(v)).decode('utf-8'))
except TypeError:
# py3
# set encoding on string and convert to bytes as required for b64encode
# decode bytes for json serialization as required for json dumps
value_encoded.append(base64.b64encode(bytes(v, 'utf-8')).decode('utf-8'))
data = self.db.create(key.strip(), json.dumps(value_encoded))
else:
self.tcex.log.warning(u'The key or value field was None.')
return data | 0.005696 |
def unbuild(self):
"""
Iterates through the views pointed to by self.detail_views, runs
unbuild_object with `self`, and calls _build_extra()
and _build_related().
"""
for detail_view in self.detail_views:
view = self._get_view(detail_view)
view().unbuild_object(self)
self._unbuild_extra()
# _build_related again to kill the object from RSS etc.
self._build_related() | 0.00432 |
def delete(self, network):
"""
Wraps the standard delete() method to catch expected exceptions and
raise the appropriate pyrax exceptions.
"""
try:
return super(CloudNetworkClient, self).delete(network)
except exc.Forbidden as e:
# Network is in use
raise exc.NetworkInUse("Cannot delete a network in use by a server.") | 0.007444 |
def get(self, id, domain='messages'):
"""
Gets a message translation.
@rtype: str
@return: The message translation
"""
assert isinstance(id, (str, unicode))
assert isinstance(domain, (str, unicode))
if self.defines(id, domain):
return self.messages[domain][id]
if self.fallback_catalogue is not None:
return self.fallback_catalogue.get(id, domain)
return id | 0.004301 |
def ginput(self, data_set=0, **kwargs):
"""
Pops up the figure for the specified data set.
Returns value from pylab.ginput().
kwargs are sent to pylab.ginput()
"""
# this will temporarily fix the deprecation warning
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
_s.tweaks.raise_figure_window(data_set+self['first_figure'])
return _p.ginput(**kwargs) | 0.007859 |
def getitem(source, index):
"""Forward one or several items from an asynchronous sequence.
The argument can either be a slice or an integer.
See the slice and item operators for more information.
"""
if isinstance(index, builtins.slice):
return slice.raw(source, index.start, index.stop, index.step)
if isinstance(index, int):
return item.raw(source, index)
raise TypeError("Not a valid index (int or slice)") | 0.002203 |
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx):
"""Merge all columns and place text string in widened cell."""
hdridxval = len(self.hdrs) - 1
worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt)
return row_idx + 1 | 0.00738 |
def semantic_similarity(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic similarity (inverse of the semantic distance)
between two GO terms.
'''
dist = semantic_distance(go_id1, go_id2, godag, branch_dist)
if dist is not None:
return 1.0 / float(dist) | 0.003257 |
def _dim_attribute(self, attr, *args, **kwargs):
"""
Returns a list of dimension attribute attr, for the
dimensions specified as strings in args.
.. code-block:: python
ntime, nbl, nchan = cube._dim_attribute('global_size', 'ntime', 'nbl', 'nchan')
or
.. code-block:: python
ntime, nbl, nchan, nsrc = cube._dim_attribute('global_size', 'ntime,nbl:nchan nsrc')
"""
import re
# If we got a single string argument, try splitting it by separators
if len(args) == 1 and isinstance(args[0], str):
args = (s.strip() for s in re.split(',|:|;| ', args[0]))
# Now get the specific attribute for each string dimension
# Integers are returned as is
result = [d if isinstance(d, (int, np.integer))
else getattr(self._dims[d], attr) for d in args]
# Return single element if length one and single else entire list
return (result[0] if kwargs.get('single', True)
and len(result) == 1 else result) | 0.005607 |
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
"%s=%s" % ((a.split('=')[0],) * 2) if '=' in a else a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % (
cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s, None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(
cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
) | 0.002365 |
def date_to_epoch(year, month, day):
""" Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date.
"""
return int(date_to_delorean(year, month, day).epoch) | 0.003205 |
def augment_detections(hyper_params, feature, label):
"""
Augment the detection dataset.
In your hyper_parameters.problem.augmentation add configurations to enable features.
Supports "enable_horizontal_flip", "enable_micro_translation", "random_crop" : {"shape": { "width", "height" }}
and "enable_texture_augmentation". Make sure to also set the "steps" otherwise this method will not be used properly.
Random crop ensures at least one detection is in the crop region.
Sample configuration
"problem": {
"augmentation": {
"steps": 40,
"enable_texture_augmentation": true,
"enable_micro_translation": true,
"enable_horizontal_flip": true,
"random_crop": {
"shape": {
"width": 256,
"height": 256
}
}
}
}
:param hyper_params: The hyper parameters object
:param feature: A dict containing all features, must be in the style created by detection datasets.
:param label: A label dict in the detection dataset style.
:return: Modified feature and label dict (copied & modified).
"""
# Do not augment these ways:
# 1) Rotation is not possible
# 3) Scaling is not possible, because it ruins depth perception
# However, random crops can improve performance. (Training speed and accuracy)
if hyper_params.problem.get("augmentation", None) is None:
return feature, label
img_h, img_w, img_c = feature["image"].shape
augmented_feature = {}
augmented_label = {}
augmented_feature["image"] = feature["image"].copy()
if "depth" in feature:
augmented_feature["depth"] = feature["depth"].copy()
if "calibration" in feature:
augmented_feature["calibration"] = feature["calibration"]
augmented_feature["hflipped"] = np.array([0], dtype=np.uint8)
augmented_feature["crop_offset"] = np.array([0, 0], dtype=np.int8)
for k in label.keys():
augmented_label[k] = [detection.copy() for detection in label[k]]
if hyper_params.problem.augmentation.get("enable_horizontal_flip", False):
if random.random() < 0.5:
img_h, img_w, img_c = augmented_feature["image"].shape
augmented_feature["image"] = np.fliplr(augmented_feature["image"])
if "depth" in feature:
augmented_feature["depth"] = np.fliplr(augmented_feature["depth"])
augmented_feature["hflipped"][0] = 1
hflip_detections(augmented_label, img_w)
if hyper_params.problem.augmentation.get("enable_micro_translation", False):
img_h, img_w, img_c = augmented_feature["image"].shape
dx = int(random.random() * 3)
dy = int(random.random() * 3)
augmented_feature["image"] = crop_image(augmented_feature["image"], dy, dx, img_h - dy, img_w - dx)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], dy, dx, img_h - dy, img_w - dx)
augmented_feature["crop_offset"][0] += dy
augmented_feature["crop_offset"][1] += dx
move_detections(augmented_label, -dy, -dx)
if hyper_params.problem.augmentation.get("random_crop", None) is not None:
img_h, img_w, img_c = augmented_feature["image"].shape
target_w = hyper_params.problem.augmentation.random_crop.shape.width
target_h = hyper_params.problem.augmentation.random_crop.shape.height
delta_x = max(int(math.ceil((target_w + 1 - img_w) / 2)), 0)
delta_y = max(int(math.ceil((target_h + 1 - img_h) / 2)), 0)
move_detections(augmented_label, delta_y, delta_x)
augmented_feature["image"] = cv2.copyMakeBorder(augmented_feature["image"],
delta_y, delta_y, delta_x, delta_x,
cv2.BORDER_CONSTANT)
img_h, img_w, img_c = augmented_feature["image"].shape
start_x = 0
start_y = 0
if len(augmented_label["detections_2d"]) != 0:
idx = random.randint(0, len(augmented_label["detections_2d"]) - 1)
detection = augmented_label["detections_2d"][idx]
start_x = int(detection.cx - random.random() * (target_w - 20) / 2.0 - 10)
start_y = int(detection.cy - random.random() * (target_h - 20) / 2.0 - 10)
else:
start_x = int(img_w * random.random())
start_y = int(img_h * random.random())
# Compute start point so that crop fit's into image and random crop contains detection
if start_x < 0:
start_x = 0
if start_y < 0:
start_y = 0
if start_x >= img_w - target_w:
start_x = img_w - target_w - 1
if start_y >= img_h - target_h:
start_y = img_h - target_h - 1
# Crop image
augmented_feature["image"] = crop_image(augmented_feature["image"], start_y, start_x, target_h, target_w)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], start_y, start_x, target_h, target_w)
augmented_feature["crop_offset"][0] += start_y
augmented_feature["crop_offset"][1] += start_x
# Crop labels
move_detections(augmented_label, -start_y, -start_x)
if hyper_params.problem.augmentation.get("enable_texture_augmentation", False):
if random.random() < 0.5:
augmented_feature["image"] = full_texture_augmentation(augmented_feature["image"])
return augmented_feature, augmented_label | 0.003373 |
def process(c, request, name=None):
"""
process uses the current request to determine which menus
should be visible, which are selected, etc.
"""
# make sure we're loaded & sorted
c.load_menus()
c.sort_menus()
if name is None:
# special case, process all menus
items = {}
for name in c.items:
items[name] = c.process(request, name)
return items
if name not in c.items:
return []
items = copy.deepcopy(c.items[name])
curitem = None
for item in items:
item.process(request)
if item.visible:
item.selected = False
if item.match_url(request):
if curitem is None or len(curitem.url) < len(item.url):
curitem = item
if curitem is not None:
curitem.selected = True
# return only visible items
visible = [
item
for item in items
if item.visible
]
# determine if we should apply 'selected' to parents when one of their
# children is the 'selected' menu
if getattr(settings, 'MENU_SELECT_PARENTS', False):
def is_child_selected(item):
for child in item.children:
if child.selected or is_child_selected(child):
return True
for item in visible:
if is_child_selected(item):
item.selected = True
return visible | 0.001244 |
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
trailing = True
while 1:
where = self.file.tell()
line = self.file.readline()
if line:
if trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
trailing = False
yield line
else:
trailing = True
self.seek(where)
time.sleep(delay) | 0.003842 |
def does_database_exist(self, database_name):
"""
Checks if a database exists in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
if len(existing_database) == 0:
return False
return True | 0.003711 |
def stop_instance(self, instance_id):
"""Stops the instance gracefully.
:param str instance_id: instance identifier
"""
instance = self._load_instance(instance_id)
instance.delete()
del self._instances[instance_id] | 0.007605 |
async def parse_request(req):
"""
Parses and validates request
:param req:
:return:
"""
async def validate_activity(activity: Activity):
if not isinstance(activity.type, str):
raise TypeError('BotFrameworkAdapter.parse_request(): invalid or missing activity type.')
return True
if not isinstance(req, Activity):
# If the req is a raw HTTP Request, try to deserialize it into an Activity and return the Activity.
if hasattr(req, 'body'):
try:
activity = Activity().deserialize(req.body)
is_valid_activity = await validate_activity(activity)
if is_valid_activity:
return activity
except Exception as e:
raise e
elif 'body' in req:
try:
activity = Activity().deserialize(req['body'])
is_valid_activity = await validate_activity(activity)
if is_valid_activity:
return activity
except Exception as e:
raise e
else:
raise TypeError('BotFrameworkAdapter.parse_request(): received invalid request')
else:
# The `req` has already been deserialized to an Activity, so verify the Activity.type and return it.
is_valid_activity = await validate_activity(req)
if is_valid_activity:
return req | 0.003814 |
def path(self):
"""
Return the path to this directory.
"""
p = ''
if self._parent and self._parent.path:
p = os.path.join(p, self._parent.path)
if self._base:
p = os.path.join(p, self._base)
if self._path:
p = os.path.join(p, self._path)
return p | 0.005747 |
def limit_result_set(self, start, end):
"""By default, searches return all matching results.
This method restricts the number of results by setting the start
and end of the result set, starting from 1. The starting and
ending results can be used for paging results when a certain
ordering is requested. The ending position must be greater than
the starting position.
arg: start (cardinal): the start of the result set
arg: end (cardinal): the end of the result set
raise: InvalidArgument - ``end`` is less than or equal to
``start``
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(start, int) or not isinstance(end, int):
raise errors.InvalidArgument('start and end arguments must be integers.')
if end <= start:
raise errors.InvalidArgument('End must be greater than start.')
# because Python is 0 indexed
# Spec says that passing in (1, 25) should include 25 entries (1 - 25)
# Python indices 0 - 24
# Python [#:##] stops before the last index, but does not include it
self._limit_result_set_start = start - 1
self._limit_result_set_end = end | 0.002342 |
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end) | 0.002907 |
def generate(self, x, **kwargs):
"""
Returns the graph for Fast Gradient Method adversarial examples.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
return fgm(
x,
self.model.get_logits(x),
y=labels,
eps=self.eps,
ord=self.ord,
clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks) | 0.0016 |
def _sample_row(self):
"""Generate a single sampled row from vine model.
Returns:
numpy.ndarray
"""
unis = np.random.uniform(0, 1, self.n_var)
# randomly select a node to start with
first_ind = np.random.randint(0, self.n_var)
adj = self.trees[0].get_adjacent_matrix()
visited = []
explore = [first_ind]
sampled = np.zeros(self.n_var)
itr = 0
while explore:
current = explore.pop(0)
neighbors = np.where(adj[current, :] == 1)[0].tolist()
if itr == 0:
new_x = self.ppfs[current](unis[current])
else:
for i in range(itr - 1, -1, -1):
current_ind = -1
if i >= self.truncated:
continue
current_tree = self.trees[i].edges
# get index of edge to retrieve
for edge in current_tree:
if i == 0:
if (edge.L == current and edge.R == visited[0]) or\
(edge.R == current and edge.L == visited[0]):
current_ind = edge.index
break
else:
if edge.L == current or edge.R == current:
condition = set(edge.D)
condition.add(edge.L)
condition.add(edge.R)
visit_set = set(visited)
visit_set.add(current)
if condition.issubset(visit_set):
current_ind = edge.index
break
if current_ind != -1:
# the node is not indepedent contional on visited node
copula_type = current_tree[current_ind].name
copula = Bivariate(CopulaTypes(copula_type))
copula.theta = current_tree[current_ind].theta
derivative = copula.partial_derivative_scalar
if i == itr - 1:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], unis[current])
)
else:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], tmp)
)
tmp = min(max(tmp, EPSILON), 0.99)
new_x = self.ppfs[current](tmp)
sampled[current] = new_x
for s in neighbors:
if s not in visited:
explore.insert(0, s)
itr += 1
visited.insert(0, current)
return sampled | 0.000655 |
def is_connected(T, directed=True):
r"""Check connectivity of the transition matrix.
Return true, if the input matrix is completely connected,
effectively checking if the number of connected components equals one.
Parameters
----------
T : scipy.sparse matrix
Transition matrix
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
connected : boolean, returning true only if T is connected.
"""
nc = connected_components(T, directed=directed, connection='strong', \
return_labels=False)
return nc == 1 | 0.002886 |
def safestr(str_):
''' get back an alphanumeric only version of source '''
str_ = str_ or ""
return "".join(x for x in str_ if x.isalnum()) | 0.006623 |
def fit_mle(self,
init_vals,
num_draws,
seed=None,
constrained_pos=None,
print_res=True,
method="BFGS",
loss_tol=1e-06,
gradient_tol=1e-06,
maxiter=1000,
ridge=None,
just_point=False,
**kwargs):
"""
Parameters
----------
init_vals : 1D ndarray.
Should contain the initial values to start the optimization process
with. There should be one value for each utility coefficient and
shape parameter being estimated.
num_draws : int.
Should be greater than zero. Denotes the number of draws that we
are making from each normal distribution.
seed : int or None, optional.
If an int is passed, it should be greater than zero. Denotes the
value to be used in seeding the random generator used to generate
the draws from the normal distribution. Default == None.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`init_values.size.` Default == None.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
method : str, optional.
Should be a valid string which can be passed to
scipy.optimize.minimize. Determines the optimization algorithm
that is used for this problem.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next which is needed to determine
convergence. Default = 1e-06.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default = 1e-06.
maxiter : int, optional.
Denotes the maximum number of iterations of the algorithm specified
by `method` that will be used to estimate the parameters of the
given model. Default == 1000.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If a float
is passed, then that float determines the ridge penalty for the
optimization. Default = None.
just_point : bool, optional.
Determines whether (True) or not (False) calculations that are non-
critical for obtaining the maximum likelihood point estimate will
be performed. If True, this function will return the results
dictionary from scipy.optimize. Default == False.
Returns
-------
None. Estimation results are saved to the model instance.
"""
# Check integrity of passed arguments
kwargs_to_be_ignored = ["init_shapes", "init_intercepts", "init_coefs"]
if any([x in kwargs for x in kwargs_to_be_ignored]):
msg = "MNL model does not use of any of the following kwargs:\n{}"
msg_2 = "Remove such kwargs and pass a single init_vals argument"
raise ValueError(msg.format(kwargs_to_be_ignored) + msg_2)
# Store the optimization method
self.optimization_method = method
# Store the ridge parameter
self.ridge_param = ridge
if ridge is not None:
warnings.warn(_ridge_warning_msg)
# Construct the mappings from alternatives to observations and from
# chosen alternatives to observations
mapping_res = self.get_mappings_for_fit()
rows_to_mixers = mapping_res["rows_to_mixers"]
# Get the draws for each random coefficient
num_mixing_units = rows_to_mixers.shape[1]
draw_list = mlc.get_normal_draws(num_mixing_units,
num_draws,
len(self.mixing_pos),
seed=seed)
# Create the 3D design matrix
self.design_3d = mlc.create_expanded_design_for_mixing(self.design,
draw_list,
self.mixing_pos,
rows_to_mixers)
# Create the estimation object
zero_vector = np.zeros(init_vals.shape)
mixl_estimator = MixedEstimator(self,
mapping_res,
ridge,
zero_vector,
split_param_vec,
constrained_pos=constrained_pos)
# Perform one final check on the length of the initial values
mixl_estimator.check_length_of_initial_values(init_vals)
# Get the estimation results
estimation_res = estimate(init_vals,
mixl_estimator,
method,
loss_tol,
gradient_tol,
maxiter,
print_res,
use_hessian=True,
just_point=just_point)
if not just_point:
# Store the mixed logit specific estimation results
args = [mixl_estimator, estimation_res]
estimation_res = add_mixl_specific_results_to_estimation_res(*args)
# Store the estimation results
self.store_fit_results(estimation_res)
return None
else:
return estimation_res | 0.002273 |
def isdir(self, relpath):
"""Returns True if path is a directory and is not ignored."""
if self._isdir_raw(relpath):
if not self.isignored(relpath, directory=True):
return True
return False | 0.009259 |
def make_put_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PUT request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users',
'{"1": "Ozgur Vatansever"}',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.put(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status() | 0.001411 |
def overlap(self, max_hang=100):
"""
Determine the type of overlap given query, ref alignment coordinates
Consider the following alignment between sequence a and b:
aLhang \ / aRhang
\------------/
/------------\
bLhang / \ bRhang
Terminal overlap: a before b, b before a
Contain overlap: a in b, b in a
"""
aL, aR = 1, self.reflen
bL, bR = 1, self.querylen
aLhang, aRhang = self.start1 - aL, aR - self.end1
bLhang, bRhang = self.start2 - bL, bR - self.end2
if self.orientation == '-':
bLhang, bRhang = bRhang, bLhang
s1 = aLhang + bRhang
s2 = aRhang + bLhang
s3 = aLhang + aRhang
s4 = bLhang + bRhang
# Dovetail (terminal) overlap
if s1 < max_hang:
type = 2 # b ~ a
elif s2 < max_hang:
type = 1 # a ~ b
# Containment overlap
elif s3 < max_hang:
type = 3 # a in b
elif s4 < max_hang:
type = 4 # b in a
else:
type = 0
return type | 0.004284 |
def _normalize_params(image, width, height, crop):
"""
Normalize params and calculate aspect.
"""
if width is None and height is None:
raise ValueError("Either width or height must be set. Otherwise "
"resizing is useless.")
if width is None or height is None:
aspect = float(image.width) / float(image.height)
if crop:
raise ValueError("Cropping the image would be useless since only "
"one dimention is give to resize along.")
if width is None:
width = int(round(height * aspect))
else:
height = int(round(width / aspect))
return (width, height, crop) | 0.001406 |
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br) | 0.003215 |
def mount(self, mount_point=None):
"""
mount container filesystem
:return: str, the location of the mounted file system
"""
cmd = ["podman", "mount", self._id or self.get_id()]
output = run_cmd(cmd, return_output=True).rstrip("\n\r")
return output | 0.006579 |
def absent(name, **connection_args):
'''
Ensure that the named database is absent
name
The name of the database to remove
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if db exists and remove it
if __salt__['mysql.db_exists'](name, **connection_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = \
'Database {0} is present and needs to be removed'.format(name)
return ret
if __salt__['mysql.db_remove'](name, **connection_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = 'Unable to remove database {0} ' \
'({1})'.format(name, err)
ret['result'] = False
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
# fallback
ret['comment'] = ('Database {0} is not present, so it cannot be removed'
).format(name)
return ret | 0.002262 |
def get_fileservice_dir():
"""
example settings file
FILESERVICE_CONFIG = {
'store_dir': '/var/lib/geoserver_data/fileservice_store'
}
"""
conf = getattr(settings, 'FILESERVICE_CONFIG', {})
dir = conf.get('store_dir', './fileservice_store')
return os.path.normpath(dir) + os.sep | 0.003145 |
def take_along_axis(large_array, indexes):
""" Take along axis """
# Reshape indexes into the right shape
if len(large_array.shape) > len(indexes.shape):
indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape))))
return np.take_along_axis(large_array, indexes, axis=0) | 0.005952 |
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False | 0.00361 |
def annotate(node):
"""Annotate a node with the stack frame describing the
SConscript file and line number that created it."""
tb = sys.exc_info()[2]
while tb and stack_bottom not in tb.tb_frame.f_locals:
tb = tb.tb_next
if not tb:
# We did not find any exec of an SConscript file: what?!
raise SCons.Errors.InternalError("could not find SConscript stack frame")
node.creator = traceback.extract_stack(tb)[0] | 0.004386 |
def haversine(px, py, r=r_mm):
'''
Calculate the haversine distance between two points
defined by (lat,lon) tuples.
Args:
px ((float,float)): lat/long position 1
py ((float,float)): lat/long position 2
r (float): Radius of sphere
Returns:
(int): Distance in mm.
'''
lat1, lon1 = px
lat2, lon2 = py
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return c * r | 0.003096 |
def get_variables(self):
"""
Returns list of variables of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_variables()
['light-on', 'bowel-problem', 'dog-out', 'hear-bark', 'family-out']
"""
variables = [variable.find('NAME').text for variable in self.network.findall('VARIABLE')]
return variables | 0.007042 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.