text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def run(cmd):
"""Run the given command.
Raises OSError is the command returns a non-zero exit status.
"""
log.debug("running '%s'", cmd)
fixed_cmd = cmd
if sys.platform == "win32" and cmd.count('"') > 2:
fixed_cmd = '"' + cmd + '"'
retval = os.system(fixed_cmd)
if hasattr(os, "WEXITSTATUS"):
status = os.WEXITSTATUS(retval)
else:
status = retval
if status:
raise OSError(status, "error running '%s'" % cmd) | 0.002083 |
def exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClientSocketBase self, TickerId tickerId, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClientSocketBase_exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override) | 0.012165 |
def get_intern_pattern (url):
"""Return intern pattern for given URL. Redirections to the same
domain with or without "www." prepended are allowed."""
parts = strformat.url_unicode_split(url)
scheme = parts[0].lower()
domain = parts[1].lower()
domain, is_idn = urlutil.idna_encode(domain)
# allow redirection www.example.com -> example.com and vice versa
if domain.startswith('www.'):
domain = domain[4:]
if not (domain and scheme):
return None
path = urlutil.splitparams(parts[2])[0]
segments = path.split('/')[:-1]
path = "/".join(segments)
if url.endswith('/'):
path += '/'
args = list(re.escape(x) for x in (scheme, domain, path))
if args[0] in ('http', 'https'):
args[0] = 'https?'
args[1] = r"(www\.|)%s" % args[1]
return "^%s://%s%s" % tuple(args) | 0.002342 |
def clear_all_cookies(self, path: str = "/", domain: str = None) -> None:
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain) | 0.00361 |
def main(forward=26944, host='127.0.0.1', listen=5555):
'''
Args:
- forward(int): local forward port
- host(string): local forward host
- listen(int): listen port
'''
# HTTP->HTTP: On your computer, browse to "http://127.0.0.1:81/" and you'll get http://www.google.com
server = maproxy.proxyserver.ProxyServer("127.0.0.1", forward)
server.listen(listen)
print("Local IP:", socket.gethostbyname(socket.gethostname()))
print("0.0.0.0:{} -> {}:{}".format(listen, host, forward))
tornado.ioloop.IOLoop.instance().start() | 0.003472 |
def run(configobj=None):
"""TEAL interface for :func:`destripe_plus`."""
destripe_plus(
configobj['input'],
suffix=configobj['suffix'],
stat=configobj['stat'],
maxiter=configobj['maxiter'],
sigrej=configobj['sigrej'],
lower=configobj['lower'],
upper=configobj['upper'],
binwidth=configobj['binwidth'],
scimask1=configobj['scimask1'],
scimask2=configobj['scimask2'],
dqbits=configobj['dqbits'],
rpt_clean=configobj['rpt_clean'],
atol=configobj['atol'],
cte_correct=configobj['cte_correct'],
clobber=configobj['clobber'],
verbose=configobj['verbose']) | 0.00146 |
def blame(self, committer=True, by='repository', ignore_globs=None, include_globs=None):
"""
Returns the blame from the current HEAD of the repositories as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to all repositories by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
df = None
for repo in self.repos:
try:
if df is None:
df = repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs)
else:
df = df.append(repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs))
except GitCommandError as err:
print('Warning! Repo: %s couldnt be blamed' % (repo, ))
pass
df = df.reset_index(level=1)
df = df.reset_index(level=1)
if committer:
if by == 'repository':
df = df.groupby('committer').agg({'loc': np.sum})
elif by == 'file':
df = df.groupby(['committer', 'file']).agg({'loc': np.sum})
else:
if by == 'repository':
df = df.groupby('author').agg({'loc': np.sum})
elif by == 'file':
df = df.groupby(['author', 'file']).agg({'loc': np.sum})
df = df.sort_values(by=['loc'], ascending=False)
return df | 0.00561 |
def get_linked_properties(cli_ctx, app, resource_group, read_properties=None, write_properties=None):
"""Maps user-facing role names to strings used to identify them on resources."""
roles = {
"ReadTelemetry": "api",
"WriteAnnotations": "annotations",
"AuthenticateSDKControlChannel": "agentconfig"
}
sub_id = get_subscription_id(cli_ctx)
tmpl = '/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/components/{}'.format(
sub_id,
resource_group,
app
)
linked_read_properties, linked_write_properties = [], []
if isinstance(read_properties, list):
propLen = len(read_properties)
linked_read_properties = ['{}/{}'.format(tmpl, roles[read_properties[i]]) for i in range(propLen)]
else:
linked_read_properties = ['{}/{}'.format(tmpl, roles[read_properties])]
if isinstance(write_properties, list):
propLen = len(write_properties)
linked_write_properties = ['{}/{}'.format(tmpl, roles[write_properties[i]]) for i in range(propLen)]
else:
linked_write_properties = ['{}/{}'.format(tmpl, roles[write_properties])]
return linked_read_properties, linked_write_properties | 0.005738 |
def get_actions(self):
"""
Returns a list of Action objects
This actions can be used to check the droplet's status
"""
answer = self.get_data("droplets/%s/actions/" % self.id, type=GET)
actions = []
for action_dict in answer['actions']:
action = Action(**action_dict)
action.token = self.token
action.droplet_id = self.id
action.load()
actions.append(action)
return actions | 0.003953 |
async def save_session( # type: ignore
self, app: 'Quart', session: SecureCookieSession, response: Response,
) -> None:
"""Saves the session to the response in a secure cookie."""
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name, domain=domain, path=path)
return
if session.accessed:
response.vary.add('Cookie')
if not self.should_set_cookie(app, session):
return
data = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(
app.session_cookie_name,
data,
expires=self.get_expiration_time(app, session),
httponly=self.get_cookie_httponly(app),
domain=domain,
path=path,
secure=self.get_cookie_secure(app),
) | 0.005139 |
def header_name(name):
"""Convert header name like HTTP_XXXX_XXX to Xxxx-Xxx:"""
words = name[5:].split('_')
for i in range(len(words)):
words[i] = words[i][0].upper() + words[i][1:].lower()
result = '-'.join(words)
return result | 0.003876 |
def read(self, from_item=None, to_item=None,
from_time=None, to_time=None):
"""Retrieve requested data coordinates from the h5features index.
:param str from_item: Optional. Read the data starting from
this item. (defaults to the first stored item)
:param str to_item: Optional. Read the data until reaching the
item. (defaults to from_item if it was specified and to
the last stored item otherwise).
:param float from_time: Optional. (defaults to the beginning
time in from_item) The specified times are included in the
output.
:param float to_time: Optional. (defaults to the ending time
in to_item) the specified times are included in the
output.
:return: An instance of h5features.Data read from the file.
"""
# handling default arguments
if to_item is None:
to_item = self.items.data[-1] if from_item is None else from_item
if from_item is None:
from_item = self.items.data[0]
# index coordinates of from/to_item. TODO optimize because we
# have 4 accesses to list.index() where 2 are enougth.
if not self.items.is_valid_interval(from_item, to_item):
raise IOError('cannot read items: not a valid interval')
from_idx = self.items.data.index(from_item)
to_idx = self.items.data.index(to_item)
from_pos = self._get_item_position(from_idx)
to_pos = self._get_item_position(to_idx)
lower = self._get_from_time(from_time, from_pos)
# upper included with +1
upper = self._get_to_time(to_time, to_pos) + 1
# Step 2: access actual data
if self.dformat == 'sparse':
raise NotImplementedError(
'Reading sparse features not implemented')
else:
features = (self.group['features'][:, lower:upper].T
if self.version == '0.1'
else self.group['features'][lower:upper, ...])
labels = self._labels_group[lower:upper]
# If we read a single item
if to_idx == from_idx:
features = [features]
labels = [labels]
# Several items case: split them from the index
else:
item_ends = self._index[from_idx:to_idx] - from_pos[0] + 1
features = np.split(features, item_ends, axis=0)
labels = np.split(labels, item_ends, axis=0)
items = self.items.data[from_idx:to_idx + 1]
if self.properties is None:
properties = None
else:
properties = self.properties[from_idx:to_idx + 1]
return Data(
items, labels, features, properties=properties, check=False) | 0.001067 |
def check_hints(self, ds):
'''
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
ret_val.extend(self._check_hint_bounds(ds))
return ret_val | 0.008499 |
def decompress(self, value):
"""
Return the primary key value for the ``Select`` widget if the given
recurrence rule exists in the queryset.
"""
if value:
try:
pk = self.queryset.get(recurrence_rule=value).pk
except self.queryset.model.DoesNotExist:
pk = None
return [pk, None, value]
return [None, None, None] | 0.004695 |
def __set_unit_price(self, value):
'''
Sets the unit price
@param value:str
'''
try:
if value < 0:
raise ValueError()
self.__unit_price = Decimal(str(value))
except ValueError:
raise ValueError("Unit Price must be a positive number") | 0.005988 |
def find_credentials(host):
'''
Cycle through all the possible credentials and return the first one that
works.
'''
user_names = [__pillar__['proxy'].get('username', 'root')]
passwords = __pillar__['proxy']['passwords']
for user in user_names:
for password in passwords:
try:
# Try to authenticate with the given user/password combination
ret = __salt__['vsphere.system_info'](host=host,
username=user,
password=password)
except SaltSystemExit:
# If we can't authenticate, continue on to try the next password.
continue
# If we have data returned from above, we've successfully authenticated.
if ret:
DETAILS['username'] = user
DETAILS['password'] = password
return user, password
# We've reached the end of the list without successfully authenticating.
raise SaltSystemExit('Cannot complete login due to an incorrect user name or password.') | 0.003466 |
def _set_packages(self, node):
'''
Set packages and collections.
:param node:
:return:
'''
pkgs = etree.SubElement(node, 'packages')
for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()):
pkg = etree.SubElement(pkgs, 'package')
pkg.set('name', pkg_name)
# Add collections (SUSE)
if self.__grains__.get('os_family', '') == 'Suse':
for ptn_id, ptn_data in self._data.software.get('patterns', {}).items():
if ptn_data.get('installed'):
ptn = etree.SubElement(pkgs, 'namedCollection')
ptn.set('name', ptn_id)
return pkgs | 0.00554 |
def on_train_end(self, logs):
""" Print training time at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration)) | 0.009756 |
def dist_docs():
"create a documentation bundle"
dist_dir = path("dist")
html_dir = path("docs/_build/html")
docs_package = path("%s/%s-%s-docs.zip" % (dist_dir.abspath(), options.setup.name, options.setup.version))
if not html_dir.exists():
error("\n*** ERROR: Please build the HTML docs!")
sys.exit(1)
dist_dir.exists() or dist_dir.makedirs()
docs_package.exists() and docs_package.remove()
sh(r'cd %s && find . -type f \! \( -path "*/.svn*" -o -name "*~" \) | sort'
' | zip -qr -@ %s' % (html_dir, docs_package,))
print
print "Upload @ http://pypi.python.org/pypi?:action=pkg_edit&name=%s" % ( options.setup.name,)
print docs_package | 0.005658 |
def load_with_datetime(pairs):
"""Deserialize JSON into python datetime objects."""
d = {}
for k, v in pairs:
if isinstance(v, basestring):
try:
d[k] = dateutil.parser.parse(v)
except ValueError:
d[k] = v
else:
d[k] = v
return d | 0.005865 |
def _wrapper(self):
"""
Wraps around a few calls which need to be made in the same thread.
"""
try:
res = self.func(*self.args, **self.kw)
except Exception as e:
self.mediator.set_error(e)
else:
self.mediator.set_result(res) | 0.006494 |
def get_credential(self, service, username):
"""Gets the username and password for the service.
Returns a Credential instance.
The *username* argument is optional and may be omitted by
the caller or ignored by the backend. Callers must use the
returned username.
"""
# The default implementation requires a username here.
if username is not None:
password = self.get_password(service, username)
if password is not None:
return credentials.SimpleCredential(
username,
password,
)
return None | 0.003035 |
def compress_encoder_1d(x, hparams, name=None):
"""Encoder that compresses 1-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, length, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * length / 2**hparams.num_compress_steps.
"""
x = tf.expand_dims(x, axis=2)
return compress_encoder(x,
hparams,
strides=(2, 1),
kernel_size=(hparams.kernel_size, 1),
name=name) | 0.006182 |
def update(self, request, *args, **kwargs):
"""
See the *Annotator* documentation regarding the
`update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_
endpoint.
:param request:
incoming :class:`rest_framework.request.Request`.
:return:
303 :class:`rest_framework.response.Response`.
"""
response = super(AnnotationViewSet, self).update(request,
*args,
**kwargs)
for h, v in self.get_success_headers(response.data).items():
response[h] = v
response.data = None
response.status_code = status.HTTP_303_SEE_OTHER
return response | 0.002545 |
def round(self, decimals=0):
"""
Wrapper around numpy.round to ensure object
of same type is returned
Args:
decimals :Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns (Tensor):
rounded tensor of same type
"""
return self.__class__(np.round(self, decimals=decimals)) | 0.004115 |
def ping():
'''
Ping CozyDB with existing credentials
'''
try:
curl_couchdb('/cozy/')
ping = True
except requests.exceptions.ConnectionError, error:
print error
ping = False
return ping | 0.004082 |
def min_rank(series, ascending=True):
"""
Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`).
"""
ranks = series.rank(method='min', ascending=ascending)
return ranks | 0.005952 |
def run(self):
"""Run install process."""
try:
self.linux.verify_system_status()
except InstallSkipError:
Log.info('Install skipped.')
return
work_dir = tempfile.mkdtemp(suffix='-rpm-py-installer')
Log.info("Created working directory '{0}'".format(work_dir))
with Cmd.pushd(work_dir):
self.rpm_py.download_and_install()
if not self.python.is_python_binding_installed():
message = (
'RPM Python binding failed to install '
'with unknown reason.'
)
raise InstallError(message)
# TODO: Print installed module name and version as INFO.
if self.is_work_dir_removed:
shutil.rmtree(work_dir)
Log.info("Removed working directory '{0}'".format(work_dir))
else:
Log.info("Saved working directory '{0}'".format(work_dir)) | 0.002053 |
def delete_one_letter(self, letter=RIGHT):
"""Delete one letter the right or the the left of the cursor."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.LEFT:
papy = self.cursor
self.text = self.text[:self.cursor - 1] + self.text[self.cursor:]
self.cursor = papy - 1
else:
self.text = self.text[:self.cursor] + self.text[self.cursor + 1:] | 0.004598 |
def new():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if '-h' in args:
print(new.__doc__)
return
dir_path = pmag.get_named_arg("-WD", ".")
if '-ID' in args and dir_path == '.':
dir_path = pmag.get_named_arg("-ID", ".")
iboot, vec = 1, 0
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
if ivec:
vec = 3
#iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
save_plots, verbose, interactive = False, True, True
if '-sav' in args:
save_plots = True
verbose = False
interactive = False
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic_nb(infile, samp_file, site_file, verbose,
ipar, ihext, ivec, isite, False, iboot,
vec, Dir, PDir, crd, num_bootstraps,
dir_path, save_plots=save_plots, interactive=interactive,
fmt=fmt) | 0.002052 |
def nla_put_u16(msg, attrtype, value):
"""Add 16 bit integer attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L588
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- numeric value to store as payload (int() or c_uint16()).
Returns:
0 on success or a negative error code.
"""
data = bytearray(value if isinstance(value, c_uint16) else c_uint16(value))
return nla_put(msg, attrtype, SIZEOF_U16, data) | 0.001808 |
def from_bytes(cls, bitstream):
'''
Parse the given packet and update properties accordingly
'''
packet = cls()
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Read the type
type_nr = bitstream.read('uint:4')
if type_nr != packet.message_type:
msg = 'Invalid bitstream for a {0} packet'
class_name = packet.__class__.__name__
raise ValueError(msg.format(class_name))
# Read the flags
packet.proxy_map_reply = bitstream.read('bool')
# Skip reserved bits
packet._reserved1 = bitstream.read(1)
# NATT bits
has_xtr_site_id = bitstream.read('bool')
packet.for_rtr = bitstream.read('bool')
# Skip reserved bits
packet._reserved2 = bitstream.read(15)
# Read the rest of the flags
packet.want_map_notify = bitstream.read('bool')
# Store the record count until we need it
record_count = bitstream.read('uint:8')
# Read the nonce
packet.nonce = bitstream.read('bytes:8')
# Read the key id
packet.key_id = bitstream.read('uint:16')
# Read the authentication data
data_length = bitstream.read('uint:16')
packet.authentication_data = bitstream.read('bytes:%d' % data_length)
# Read the records
for dummy in range(record_count):
record = MapRegisterRecord.from_bytes(bitstream)
packet.records.append(record)
# Read the xtr-id and site-id
if has_xtr_site_id:
packet.xtr_id = bitstream.read('uint:128')
packet.site_id = bitstream.read('uint:64')
# Verify that the properties make sense
packet.sanitize()
return packet | 0.000989 |
def get_secret(self, secure_data_path, key, version=None):
"""
(Deprecated)Return the secret based on the secure data path and key
This method is deprecated because it misleads users into thinking they're only getting one value from Cerberus
when in reality they're getting all values, from which a single value is returned.
Use get_secrets_data(secure_data_path)[key] instead.
(See https://github.com/Nike-Inc/cerberus-python-client/issues/18)
"""
warnings.warn(
"get_secret is deprecated, use get_secrets_data instead",
DeprecationWarning
)
secret_resp_json = self._get_secrets(secure_data_path, version)
if key in secret_resp_json['data']:
return secret_resp_json['data'][key]
else:
raise CerberusClientException("Key '%s' not found" % key) | 0.004499 |
def _format_to_fixed_precision(self, precision):
""" Format 'self' to a given number of digits after the decimal point.
Returns a triple (negative, digits, exp) where:
- negative is a boolean, True for a negative number, else False
- digits is a string giving the digits of the output
- exp represents the exponent of the output
The normalization of the exponent is such that <digits>E<exp>
represents the decimal approximation to self.
"""
# MPFR only provides functions to format to a given number of
# significant digits. So we must:
#
# (1) Identify an e such that 10**(e-1) <= abs(x) < 10**e.
#
# (2) Determine the number of significant digits required, and format
# to that number of significant digits.
#
# (3) Adjust output if necessary if it's been rounded up to 10**e.
# Zeros
if is_zero(self):
return is_negative(self), '0', -precision
# Specials
if is_inf(self):
return is_negative(self), 'inf', None
if is_nan(self):
return is_negative(self), 'nan', None
# Figure out the exponent by making a call to get_str2. exp satisfies
# 10**(exp-1) <= self < 10**exp
_, _, exp = _mpfr_get_str2(
10,
2,
self,
ROUND_TOWARD_ZERO,
)
sig_figs = exp + precision
if sig_figs < 0:
sign = self._sign()
return sign, '0', -precision
elif sig_figs == 0:
# Ex: 0.1 <= x < 1.0, rounding x to nearest multiple of 1.0.
# Or: 100.0 <= x < 1000.0, rounding x to nearest multiple of 1000.0
sign, digits, new_exp = _mpfr_get_str2(
10,
2,
self,
ROUND_TOWARD_NEGATIVE,
)
if int(digits) == 50:
# Halfway case
sign, digits, new_exp = _mpfr_get_str2(
10,
2,
self,
ROUND_TOWARD_POSITIVE,
)
digits = '1' if int(digits) > 50 or new_exp == exp + 1 else '0'
return sign, digits, -precision
negative, digits, new_exp = self._format_to_floating_precision(
sig_figs
)
# It's possible that the rounding up involved changes the exponent;
# in that case we have to adjust the digits accordingly. The only
# possibility should be that new_exp == exp + 1.
if new_exp + len(digits) != exp:
assert new_exp + len(digits) == exp + 1
digits += '0'
return negative, digits, -precision | 0.000718 |
def UInt32():
"""Returns a pseudo-random 32-bit unsigned integer."""
with _mutex:
try:
return _random_buffer.pop()
except IndexError:
data = os.urandom(struct.calcsize("=L") * _random_buffer_size)
_random_buffer.extend(
struct.unpack("=" + "L" * _random_buffer_size, data))
return _random_buffer.pop() | 0.020173 |
def container_rename(name, newname, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
if container.status_code == CONTAINER_STATUS_RUNNING:
raise SaltInvocationError(
"Can't rename the running container '{0}'.".format(name)
)
container.rename(newname, wait=True)
return _pylxd_model_to_dict(container) | 0.0008 |
def _get_tol(tol, dtype, validate_args):
"""Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional."""
if tol is None:
return tf.convert_to_tensor(value=0, dtype=dtype)
tol = tf.convert_to_tensor(value=tol, dtype=dtype)
if validate_args:
tol = distribution_util.with_dependencies([
assert_util.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol | 0.013514 |
def optional_actions(encrypt, path, compress_file, **kwargs):
'''
Optional actions about of AWS S3 and encrypt file.
'''
yes = ('y', 'Y')
file_to_upload = normalize_path(path) + compress_file[1]
if encrypt in yes:
encrypt_file(compress_file[1], compress_file[0])
file_to_upload = compress_file[0]
if kwargs.get('s3') in yes:
factory_uploader('S3', name_backup=file_to_upload,
bucket_name=AWS_BUCKET_NAME, action='upload')
if kwargs.get('glacier') in yes:
factory_uploader('Glacier', name_backup=file_to_upload,
vault_name=AWS_VAULT_NAME,
path=os.path.join(os.path.expanduser('~'),
'.zoort.db'),
action='upload')
if kwargs.get('dropbox') in yes:
factory_uploader('Dropbox', name_backup=file_to_upload,
action='upload')
if kwargs.get('swift') in yes:
factory_uploader('Swift', name_backup=file_to_upload,
action='upload') | 0.000907 |
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format') | 0.00237 |
def unique_combs(df):
"""
Return data frame with all possible combinations
of the values in the columns
"""
# List of unique values from every column
lst = (x.unique() for x in (df[c] for c in df))
rows = list(itertools.product(*lst))
_df = pd.DataFrame(rows, columns=df.columns)
# preserve the column dtypes
for col in df:
_df[col] = _df[col].astype(df[col].dtype, copy=False)
return _df | 0.002268 |
def betabin_like(x, alpha, beta, n):
R"""
Beta-binomial log-likelihood. Equivalent to binomial random
variables with probabilities drawn from a
:math:`\texttt{Beta}(\alpha,\beta)` distribution.
.. math::
f(x \mid \alpha, \beta, n) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)} \frac{\Gamma(n+1)}{\Gamma(x+1)\Gamma(n-x+1)} \frac{\Gamma(\alpha + x)\Gamma(n+\beta-x)}{\Gamma(\alpha+\beta+n)}
:Parameters:
- `x` : x=0,1,\ldots,n
- `alpha` : alpha > 0
- `beta` : beta > 0
- `n` : n=x,x+1,\ldots
:Example:
>>> betabin_like(3,1,1,10)
-2.3978952727989
.. note::
- :math:`E(X)=n\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=n\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
return flib.betabin_like(x, alpha, beta, n) | 0.002439 |
def labels(self, leaves=True, internal=True):
'''Generator over the (non-``None``) ``Node`` labels of this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
'''
if not isinstance(leaves, bool):
raise TypeError("leaves must be a bool")
if not isinstance(internal, bool):
raise TypeError("internal must be a bool")
for node in self.traverse_preorder():
if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())):
yield node.label | 0.007062 |
def network_expansion(network, method = 'rel', ext_min=0.1,
ext_width=False, filename=None, boundaries=[]):
"""Plot relative or absolute network extension of AC- and DC-lines.
Parameters
----------
network: PyPSA network container
Holds topology of grid including results from powerflow analysis
method: str
Choose 'rel' for extension relative to s_nom and 'abs' for
absolute extensions.
ext_min: float
Choose minimum relative line extension shown in plot in p.u..
ext_width: float or bool
Choose if line_width respects line extension. Turn off with 'False' or
set linear factor to decremise extension line_width.
filename: str or None
Save figure in this direction
boundaries: array
Set boundaries of heatmap axis
"""
cmap = plt.cm.jet
overlay_network = network.copy()
overlay_network.lines = overlay_network.lines[
overlay_network.lines.s_nom_extendable & ((
overlay_network.lines.s_nom_opt -
overlay_network.lines.s_nom_min) /
overlay_network.lines.s_nom >= ext_min)]
overlay_network.links = overlay_network.links[
overlay_network.links.p_nom_extendable & ((
overlay_network.links.p_nom_opt -
overlay_network.links.p_nom_min)/
overlay_network.links.p_nom >= ext_min)]
for i, row in overlay_network.links.iterrows():
linked = overlay_network.links[(row['bus1'] ==
overlay_network.links.bus0) & (
row['bus0'] == overlay_network.links.bus1)]
if not linked.empty:
if row['p_nom_opt'] < linked.p_nom_opt.values[0]:
overlay_network.links.p_nom_opt[i] = linked.p_nom_opt.values[0]
array_line = [['Line'] * len(overlay_network.lines),
overlay_network.lines.index]
array_link = [['Link'] * len(overlay_network.links),
overlay_network.links.index]
if method == 'rel':
extension_lines = pd.Series((100 *
(overlay_network.lines.s_nom_opt -
overlay_network.lines.s_nom_min) /
overlay_network.lines.s_nom).data,
index=array_line)
extension_links = pd.Series((100 *
(overlay_network.links.p_nom_opt -
overlay_network.links.p_nom_min)/
(overlay_network.links.p_nom)).data,
index=array_link)
if method == 'abs':
extension_lines = pd.Series(
(overlay_network.lines.s_nom_opt -
overlay_network.lines.s_nom_min).data,
index=array_line)
extension_links = pd.Series(
(overlay_network.links.p_nom_opt -
overlay_network.links.p_nom_min).data,
index=array_link)
extension = extension_lines.append(extension_links)
# Plot whole network in backgroud of plot
network.plot(
line_colors=pd.Series("grey", index = [['Line'] * len(
network.lines), network.lines.index]).append(
pd.Series("grey", index = [['Link'] * len(network.links),
network.links.index])),
bus_sizes=0,
line_widths=pd.Series(0.5, index = [['Line'] * len(network.lines),
network.lines.index]).append(
pd.Series(0.55, index = [['Link'] * len(network.links),
network.links.index])))
if not ext_width:
line_widths= pd.Series(0.8, index = array_line).append(
pd.Series(0.8, index = array_link))
else:
line_widths= 0.5 + (extension / ext_width)
ll = overlay_network.plot(
line_colors=extension,
line_cmap=cmap,
bus_sizes=0,
title="Optimized AC- and DC-line expansion",
line_widths=line_widths)
if not boundaries:
v = np.linspace(min(extension), max(extension), 101)
boundaries = [min(extension), max(extension)]
else:
v = np.linspace(boundaries[0], boundaries[1], 101)
if not extension_links.empty:
cb_Link = plt.colorbar(ll[2], boundaries=v,
ticks=v[0:101:10])
cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1])
cb_Link.remove()
cb = plt.colorbar(ll[1], boundaries=v,
ticks=v[0:101:10], fraction=0.046, pad=0.04)
cb.set_clim(vmin=boundaries[0], vmax=boundaries[1])
if method == 'rel':
cb.set_label('line expansion relative to s_nom in %')
if method == 'abs':
cb.set_label('line expansion in MW')
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.close() | 0.009524 |
def rlmf_eval():
"""Eval set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.batch_size = 8
hparams.eval_sampling_temps = [0.0, 0.5, 1.0]
hparams.eval_rl_env_max_episode_steps = -1
hparams.add_hparam("ppo_epoch_length", 128)
hparams.add_hparam("ppo_optimization_batch_size", 32)
hparams.add_hparam("ppo_epochs_num", 10000)
hparams.add_hparam("ppo_eval_every_epochs", 500)
hparams.add_hparam("attempt", 0)
hparams.add_hparam("moe_loss_coef", 0)
return hparams | 0.025896 |
def reactivate(self):
"""
Reactivates this subscription.
If a customer's subscription is canceled with ``at_period_end`` set to True and it has not yet reached the end
of the billing period, it can be reactivated. Subscriptions canceled immediately cannot be reactivated.
(Source: https://stripe.com/docs/subscriptions/canceling-pausing)
.. warning:: Reactivating a fully canceled Subscription will fail silently. Be sure to check the returned \
Subscription's status.
"""
stripe_subscription = self.api_retrieve()
stripe_subscription.plan = self.plan.id
stripe_subscription.cancel_at_period_end = False
return Subscription.sync_from_stripe_data(stripe_subscription.save()) | 0.024286 |
def _get_key_value(string):
"""Return the (key, value) as a tuple from a string."""
# Normally all properties look like this:
# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF
# Disk Name: /dev/sda
# Mount Points: None
key = ''
value = ''
try:
key, value = string.split(': ')
except ValueError:
# This handles the case when the property of a logical drive
# returned is as follows. Here we cannot split by ':' because
# the disk id has colon in it. So if this is about disk,
# then strip it accordingly.
# Mirror Group 0: physicaldrive 6I:1:5
string = string.lstrip(' ')
if string.startswith('physicaldrive'):
fields = string.split(' ')
# Include fields[1] to key to avoid duplicate pairs
# with the same 'physicaldrive' key
key = fields[0] + " " + fields[1]
value = fields[1]
else:
# TODO(rameshg87): Check if this ever occurs.
return string.strip(' '), None
return key.strip(' '), value.strip(' ') | 0.000903 |
def run_in_transaction(self, func, *args, **kw):
"""Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: optional keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default timeout.
:rtype: :class:`datetime.datetime`
:returns: timestamp of committed transaction
"""
# Sanity check: Is there a transaction already running?
# If there is, then raise a red flag. Otherwise, mark that this one
# is running.
if getattr(self._local, "transaction_running", False):
raise RuntimeError("Spanner does not support nested transactions.")
self._local.transaction_running = True
# Check out a session and run the function in a transaction; once
# done, flip the sanity check bit back.
try:
with SessionCheckout(self._pool) as session:
return session.run_in_transaction(func, *args, **kw)
finally:
self._local.transaction_running = False | 0.001374 |
def tooltip_query(self, widget, x, y, keyboard_mode, tooltip):
"""
Set tooltip which appears when you hover mouse curson onto icon in system panel.
"""
tooltip.set_text(subprocess.getoutput("acpi"))
return True | 0.012 |
def change(img):
"""Set the wallpaper."""
if not os.path.isfile(img):
return
desktop = get_desktop_env()
if OS == "Darwin":
set_mac_wallpaper(img)
elif OS == "Windows":
set_win_wallpaper(img)
else:
set_desktop_wallpaper(desktop, img)
logging.info("Set the new wallpaper.") | 0.002967 |
def save(self, file):
""" Saves the :class:`~pypot.primitive.move.Move` to a json file.
.. note:: The format used to store the :class:`~pypot.primitive.move.Move` is extremely verbose and should be obviously optimized for long moves.
"""
d = {
'framerate': self.framerate,
'positions': self._timed_positions,
}
json.dump(d, file, indent=2) | 0.007282 |
async def add_line(self, *args, **kwargs):
"""
A proxy function that allows this PaginatorInterface to remain locked to the last page
if it is already on it.
"""
display_page = self.display_page
page_count = self.page_count
self.paginator.add_line(*args, **kwargs)
new_page_count = self.page_count
if display_page + 1 == page_count:
# To keep position fixed on the end, update position to new last page and update message.
self._display_page = new_page_count
self.bot.loop.create_task(self.update()) | 0.006547 |
def uninstall(app, opts=[]):
"""
Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_UNINSTALL, _convert_opts(opts), app]
return _exec_command(adb_full_cmd) | 0.007538 |
def unzip_to_temp_dir(zip_file_name):
"""Unzip zipfile to a temporary directory.
The directory of the unzipped files is returned if success,
otherwise None is returned. """
if not zip_file_name or not os.path.exists(zip_file_name):
return None
zf = zipfile.ZipFile(zip_file_name)
if zf.testzip() is not None:
return None
# Unzip the files into a temporary directory
LOGGER.info("Extracting zipped file: %s" % zip_file_name)
tempdir = tempfile.mkdtemp()
try:
# Create directories that don't exist
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if (name.endswith(os.path.sep) and not os.path.exists(dest)):
os.mkdir(dest)
LOGGER.debug("Directory %s created." % dest)
# Copy files
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if not (name.endswith(os.path.sep)):
LOGGER.debug("Copying file %s......" % dest)
outfile = open(dest, 'wb')
outfile.write(zf.read(zip_name))
outfile.close()
LOGGER.debug("File %s copied." % dest)
LOGGER.info("Unzipped file can be found at %s" % tempdir)
return tempdir
except IOError as err:
LOGGER.error("Error in extracting webdriver.xpi: %s" % err)
return None | 0.000504 |
def release(self, connection: Connection, reuse: bool=True):
'''Unregister a connection.
Args:
connection: Connection instance returned from :meth:`acquire`.
reuse: If True, the connection is made available for reuse.
Coroutine.
'''
yield from self._condition.acquire()
self.busy.remove(connection)
if reuse:
self.ready.add(connection)
self._condition.notify()
self._condition.release() | 0.008032 |
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc) | 0.009174 |
def get_object(self, queryset=None):
"""
Assign the language for the retrieved object.
"""
object = super(LanguageChoiceMixin, self).get_object(queryset)
if isinstance(object, TranslatableModelMixin):
object.set_current_language(self.get_language(), initialize=True)
return object | 0.005882 |
def stream(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
from_=from_,
to=to,
date_created_on_or_before=date_created_on_or_before,
date_created_after=date_created_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | 0.008538 |
def split_string(self, string):
""" Yields substrings for which the same escape code applies.
"""
self.actions = []
start = 0
# strings ending with \r are assumed to be ending in \r\n since
# \n is appended to output strings automatically. Accounting
# for that, here.
last_char = '\n' if len(string) > 0 and string[-1] == '\n' else None
string = string[:-1] if last_char is not None else string
for match in ANSI_OR_SPECIAL_PATTERN.finditer(string):
raw = string[start:match.start()]
substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
if substring or self.actions:
yield substring
self.actions = []
start = match.end()
groups = filter(lambda x: x is not None, match.groups())
g0 = groups[0]
if g0 == '\a':
self.actions.append(BeepAction('beep'))
yield None
self.actions = []
elif g0 == '\r':
self.actions.append(CarriageReturnAction('carriage-return'))
yield None
self.actions = []
elif g0 == '\b':
self.actions.append(BackSpaceAction('backspace'))
yield None
self.actions = []
elif g0 == '\n' or g0 == '\r\n':
self.actions.append(NewLineAction('newline'))
yield g0
self.actions = []
else:
params = [ param for param in groups[1].split(';') if param ]
if g0.startswith('['):
# Case 1: CSI code.
try:
params = map(int, params)
except ValueError:
# Silently discard badly formed codes.
pass
else:
self.set_csi_code(groups[2], params)
elif g0.startswith(']'):
# Case 2: OSC code.
self.set_osc_code(params)
raw = string[start:]
substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
if substring or self.actions:
yield substring
if last_char is not None:
self.actions.append(NewLineAction('newline'))
yield last_char | 0.001665 |
def set_acceleration(self, settings):
'''
Sets the acceleration (mm/sec^2) that a given axis will move
settings
Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C')
and floating point number for mm-per-second-squared (mm/sec^2)
'''
self._acceleration.update(settings)
values = ['{}{}'.format(axis.upper(), value)
for axis, value in sorted(settings.items())]
command = '{} {}'.format(
GCODES['ACCELERATION'],
' '.join(values)
)
log.debug("set_acceleration: {}".format(command))
self._send_command(command) | 0.003017 |
def get_config_parameter_multiline(config: ConfigParser,
section: str,
param: str,
default: List[str]) -> List[str]:
"""
Get multi-line string parameter from ``configparser`` ``.INI`` file,
as a list of strings (one per line, ignoring blank lines).
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
"""
try:
multiline = config.get(section, param)
lines = [x.strip() for x in multiline.splitlines()]
return [line for line in lines if line]
except (TypeError, ValueError, NoOptionError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
return default | 0.000993 |
def _print(self, *args):
""" internal print to self.fobj """
string = u" ".join(args) + '\n'
self.fobj.write(string) | 0.014286 |
def process_response(self, request, response):
"""
Create the logging message..
"""
try:
log_dict = create_log_dict(request, response)
# add the request time to the log_dict; if no start time is
# available, use -1 as NA value
request_time = (
time.time() - self.start_time if hasattr(self, 'start_time')
and self.start_time else -1)
log_dict.update({'request_time': request_time})
is_request_time_too_high = (
request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD))
use_sql_info = settings.DEBUG or is_request_time_too_high
log_msg = create_log_message(log_dict, use_sql_info, fmt=False)
if is_request_time_too_high:
logger.warning(log_msg, log_dict, extra=log_dict)
else:
logger.info(log_msg, log_dict, extra=log_dict)
except Exception as e:
logger.exception(e)
return response | 0.001901 |
def read_ascii_series(input_, array_type=Series, unpack=True, **kwargs):
"""Read a `Series` from an ASCII file
Parameters
----------
input : `str`, `file`
file to read
array_type : `type`
desired return type
"""
xarr, yarr = loadtxt(input_, unpack=unpack, **kwargs)
return array_type(yarr, xindex=xarr) | 0.002841 |
def get_hash(self, handle):
"""
Get the associated hash for the given handle, the hash file must
exist (``handle + '.hash'``).
Args:
handle (str): Path to the template to get the hash from
Returns:
str: Hash for the given handle
"""
response = self.open_url(url=handle, suffix='.hash')
try:
return response.read()
finally:
response.close() | 0.004338 |
def _short_chrom(self, chrom):
"""Plot standard chromosomes + X, sorted numerically.
Allows specification from a list of chromosomes via config
for non-standard genomes.
"""
default_allowed = set(["X"])
allowed_chroms = set(getattr(config, "goleft_indexcov_config", {}).get("chromosomes", []))
chrom_clean = chrom.replace("chr", "")
try:
chrom_clean = int(chrom_clean)
except ValueError:
if chrom_clean not in default_allowed and chrom_clean not in allowed_chroms:
chrom_clean = None
if allowed_chroms:
if chrom in allowed_chroms or chrom_clean in allowed_chroms:
return chrom_clean
elif isinstance(chrom_clean, int) or chrom_clean in default_allowed:
return chrom_clean | 0.004756 |
def _CollectArguments(function, args, kwargs):
"""Merges positional and keyword arguments into a single dict."""
all_args = dict(kwargs)
arg_names = inspect.getargspec(function)[0]
for position, arg in enumerate(args):
if position < len(arg_names):
all_args[arg_names[position]] = arg
return all_args | 0.021875 |
def render(self, template=None):
"""Render the plot using a template.
Once the plot is complete, it needs to be rendered. Artist uses
the Jinja2 templating engine. The default template results in a
LaTeX file which can be included in your document.
:param template: a user-supplied template or None.
:type template: string or None.
:returns: the rendered template as string.
"""
if not template:
template = self.template
for subplot in self.subplots:
subplot._prepare_data()
response = template.render(rows=self.rows, columns=self.columns,
xmode=self.xmode, ymode=self.ymode,
width=self.width, height=self.height,
xlabel=self.xlabel, ylabel=self.ylabel,
limits=self.limits, ticks=self.ticks,
colorbar=self.colorbar,
colormap=self.colormap,
external_filename=self.external_filename,
font_options=self.font_options,
axis_options=self.axis_options,
subplots=self.subplots,
plot_template=self.template)
return response | 0.001403 |
def do_GEOHASHTOGEOJSON(self, geoh):
"""Build GeoJSON corresponding to geohash given as parameter.
GEOHASHTOGEOJSON u09vej04 [NEIGHBORS 0|1|2]"""
geoh, with_neighbors = self._match_option('NEIGHBORS', geoh)
bbox = geohash.bbox(geoh)
try:
with_neighbors = int(with_neighbors)
except TypeError:
with_neighbors = 0
def expand(bbox, geoh, depth):
neighbors = geohash.neighbors(geoh)
for neighbor in neighbors:
other = geohash.bbox(neighbor)
if with_neighbors > depth:
expand(bbox, neighbor, depth + 1)
else:
if other['n'] > bbox['n']:
bbox['n'] = other['n']
if other['s'] < bbox['s']:
bbox['s'] = other['s']
if other['e'] > bbox['e']:
bbox['e'] = other['e']
if other['w'] < bbox['w']:
bbox['w'] = other['w']
if with_neighbors > 0:
expand(bbox, geoh, 0)
geojson = {
"type": "Polygon",
"coordinates": [[
[bbox['w'], bbox['n']],
[bbox['e'], bbox['n']],
[bbox['e'], bbox['s']],
[bbox['w'], bbox['s']],
[bbox['w'], bbox['n']]
]]
}
print(white(json.dumps(geojson))) | 0.001364 |
def add_pool_member(lb, name, port, pool_name):
'''
Add a node to a pool
CLI Examples:
.. code-block:: bash
salt-run f5.add_pool_member load_balancer 10.0.0.1 80 my_pool
'''
if __opts__['load_balancers'].get(lb, None):
(username, password) = list(__opts__['load_balancers'][lb].values())
else:
raise Exception('Unable to find `{0}` load balancer'.format(lb))
F5 = F5Mgmt(lb, username, password)
F5.add_pool_member(name, port, pool_name)
return True | 0.001942 |
def list(self, body, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param ordered: whether this list is ordered or not.
"""
mark = '#. ' if ordered else '* '
lines = body.splitlines()
for i, line in enumerate(lines):
if line and not line.startswith(self.list_marker):
lines[i] = ' ' * len(mark) + line
return '\n{}\n'.format(
'\n'.join(lines)).replace(self.list_marker, mark) | 0.003704 |
def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue) | 0.003571 |
def filepaths_in_dir(path):
"""Find all files in a directory, and return the relative paths to those files.
Args:
path (str): the directory path to walk
Returns:
list: the list of relative paths to all files inside of ``path`` or its
subdirectories.
"""
filepaths = []
for root, directories, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(root, filename)
filepath = filepath.replace(path, '').lstrip('/')
filepaths.append(filepath)
return filepaths | 0.003431 |
def get_content_children(self, content_id, expand=None, parent_version=None, callback=None):
"""
Returns a map of the direct children of a piece of Content. Content can have multiple types of children -
for example a Page can have children that are also Pages, but it can also have Comments and Attachments.
The {@link ContentType}(s) of the children returned is specified by the "expand" query parameter in the request
- this parameter can include expands for multiple child types.
If no types are included in the expand parameter, the map returned will just list the child types that
are available to be expanded for the {@link Content} referenced by the "content_id" parameter.
:param content_id (string): A string containing the id of the content to retrieve children for.
:param expand (string): OPTIONAL :A comma separated list of properties to expand on the children.
Default: None.
:param parent_version (int): OPTIONAL: An integer representing the version of the content to retrieve
children for. Default: 0 (Latest)
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/child endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
if parent_version:
params["parentVersion"] = parent_version
return self._service_get_request("rest/api/content/{id}/child".format(id=content_id), params=params,
callback=callback) | 0.007447 |
def relabel(image):
"""Given a labeled image, relabel each of the objects consecutively
image - a labeled 2-d integer array
returns - (labeled image, object count)
"""
#
# Build a label table that converts an old label # into
# labels using the new numbering scheme
#
unique_labels = np.unique(image[image!=0])
if len(unique_labels) == 0:
return (image,0)
consecutive_labels = np.arange(len(unique_labels))+1
label_table = np.zeros(unique_labels.max()+1, int)
label_table[unique_labels] = consecutive_labels
#
# Use the label table to remap all of the labels
#
new_image = label_table[image]
return (new_image,len(unique_labels)) | 0.008392 |
def print_ast(ast, indent=' ', initlevel=0, newline='\n', file=sys.stdout):
'''
Pretty print an ast node.
:param ast: the ast to print.
:param indent: how far to indent a newline.
:param initlevel: starting indent level
:param newline: The newline character.
:param file: file object to print to
To print a short ast you may want to use::
node = ast.parse(source)
print_ast(node, indent='', newline='')
'''
visitor = ASTPrinter(indent=indent, level=initlevel, newline=newline)
visitor.visit(ast)
visitor.dump(file=file) | 0.008224 |
def __extract_directory(self, path, files, destination):
"""Extracts a single directory to the specified directory on disk.
Args:
path (str):
Relative (to the root of the archive) path of the directory
to extract.
files (dict):
A dictionary of files from a *.asar file header.
destination (str):
The path to extract the files to.
"""
# assures the destination directory exists
destination_path = os.path.join(destination, path)
if not os.path.exists(destination_path):
os.makedirs(destination_path)
for name, contents in files.items():
item_path = os.path.join(path, name)
# objects that have a 'files' member are directories,
# recurse into them
if 'files' in contents:
self.__extract_directory(
item_path,
contents['files'],
destination
)
continue
self.__extract_file(item_path, contents, destination) | 0.001744 |
def skip(self):
"""
Advance the internal pointer to the end of the data
area in the stream. This allows the next call to
:meth:`Reader.read` to succeed, as though all the
data had been read by the application.
"""
self.stream.seek(self.bytes_remaining(), os.SEEK_CUR)
self._pos = self.length | 0.005634 |
def save(self):
"""
Save the state to a file.
"""
with open(self.path, 'w') as f:
f.write(yaml.dump(dict(self.d))) | 0.012658 |
def _share_project(self, destination, project, to_user, force_send, auth_role='', user_message='',
share_users=None):
"""
Send message to remote service to email/share project with to_user.
:param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param project: RemoteProject project we are sharing
:param to_user: RemoteUser user we are sharing with
:param auth_role: str project role eg 'project_admin' email is customized based on this setting.
:param user_message: str message to be sent with the share
:param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only)
:return: the email the user should receive a message on soon
"""
from_user = self.remote_store.get_current_user()
share_user_ids = None
if share_users:
share_user_ids = [share_user.id for share_user in share_users]
item = D4S2Item(destination=destination,
from_user_id=from_user.id,
to_user_id=to_user.id,
project_id=project.id,
project_name=project.name,
auth_role=auth_role,
user_message=user_message,
share_user_ids=share_user_ids)
item.send(self.api, force_send)
return to_user.email | 0.004749 |
def _verify(self, valid_subscriptions, fix):
"""Check if `self` is valid roster item.
Valid item must have proper `subscription` and valid value for 'ask'.
:Parameters:
- `valid_subscriptions`: sequence of valid subscription values
- `fix`: if `True` than replace invalid 'subscription' and 'ask'
values with the defaults
:Types:
- `fix`: `bool`
:Raise: `ValueError` if the item is invalid.
"""
if self.subscription not in valid_subscriptions:
if fix:
logger.debug("RosterItem.from_xml: got unknown 'subscription':"
" {0!r}, changing to None".format(self.subscription))
self.subscription = None
else:
raise ValueError("Bad 'subscription'")
if self.ask not in (None, u"subscribe"):
if fix:
logger.debug("RosterItem.from_xml: got unknown 'ask':"
" {0!r}, changing to None".format(self.ask))
self.ask = None
else:
raise ValueError("Bad 'ask'") | 0.003457 |
def Substitute(self, pattern):
"""Formats given pattern with this substitution environment.
A pattern can contain placeholders for variables (`%%foo%%`) and scopes
(`%%bar.baz%%`) that are replaced with concrete values in this substiution
environment (specified in the constructor).
Args:
pattern: A pattern with placeholders to substitute.
Returns:
A pattern with placeholders substituted with concrete values.
"""
if isinstance(pattern, bytes):
substs = [re.escape(subst.encode("ascii")) for subst in self._substs]
regex = re.compile(b"|".join(substs))
def Replacement(match):
key = match.group(0).decode("ascii")
return self._substs[key].encode("utf-8")
elif isinstance(pattern, Text):
substs = [re.escape(subst) for subst in self._substs]
regex = re.compile("|".join(substs))
def Replacement(match):
key = match.group(0)
return self._substs[key]
else:
raise TypeError("Unexpected pattern type '{}'".format(type(pattern)))
if not substs:
return pattern
else:
return regex.sub(Replacement, pattern) | 0.008666 |
def get_environment_paths(config, env):
"""
Get environment paths from given environment variable.
"""
if env is None:
return config.get(Config.DEFAULTS, 'environment')
# Config option takes precedence over environment key.
if config.has_option(Config.ENVIRONMENTS, env):
env = config.get(Config.ENVIRONMENTS, env).replace(' ', '').split(';')
else:
env = os.getenv(env)
if env:
env = env.split(os.pathsep)
return [i for i in env if i] | 0.001905 |
def _sample_names(files, kwargs):
"""
Make sample (or other) names.
Parameters:
-----------
files : list of string
Typically a list of file paths although could be any list of strings
that you want to make names for. If neither names nor define_sample_name
are provided, then files is returned as is.
kwargs : dict
kwargs from another function. Can include the following keys with
appropriate arguments.
names : list of strings
Names to use. Overrides define_sample_name if provided.
define_sample_name : function that takes string as input
Function mapping string to name. For instance, you may have a sample
name in a file path and use a regex to extract it.
"""
if 'define_sample_name' not in kwargs.keys():
define_sample_name = lambda x: x
else:
define_sample_name = kwargs['define_sample_name']
if 'names' in kwargs.keys():
names = kwargs['names']
else:
names = [define_sample_name(f) for f in files]
assert len(names) == len(files)
return names | 0.004456 |
def use_theme(theme):
"""Make the given theme current.
There are two included themes: light_theme, dark_theme.
"""
global current
current = theme
import scene
if scene.current is not None:
scene.current.stylize() | 0.004016 |
def getbydatatype(data_type, besteffort=True):
"""Get schema class by data type.
:param type data_type: data type from where get schema class.
:param bool besteffort: if True and data_type not registered, parse all
registered data_types and stop when once data_type is a subclass of
input data_type.
:return: sub class of Schema.
:rtype: type
"""
return _REGISTRY.getbydatatype(data_type=data_type, besteffort=besteffort) | 0.002146 |
def project_data_dir(self, *args) -> str:
""" Directory where to store data """
return os.path.normpath(os.path.join(self.project_dir, 'data', *args)) | 0.012048 |
def plot_latent_scatter(self, labels=None,
which_indices=None,
legend=True,
plot_limits=None,
marker='<>^vsd',
num_samples=1000,
projection='2d',
**kwargs):
"""
Plot a scatter plot of the latent space.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param bool legend: whether to plot the legend on the figure
:param plot_limits: the plot limits for the plot
:type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))
:param str marker: markers to use - cycle if more labels then markers are given
:param kwargs: the kwargs for the scatter plots
"""
canvas, projection, kwargs, sig_dims = _new_canvas(self, projection, kwargs, which_indices)
X, _, _ = get_x_y_var(self)
if labels is None:
labels = np.ones(self.num_data)
legend = False
else:
legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]
scatters = _plot_latent_scatter(canvas, X, sig_dims, labels, marker, num_samples, projection=projection, **kwargs)
return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend) | 0.003623 |
def get_default_target_names(estimator, num_targets=None):
"""
Return a vector of target names: "y" if there is only one target,
and "y0", "y1", ... if there are multiple targets.
"""
if num_targets is None:
if len(estimator.coef_.shape) <= 1:
num_targets = 1
else:
num_targets, _ = estimator.coef_.shape
if num_targets == 1:
target_names = ['y']
else:
target_names = ['y%d' % i for i in range(num_targets)]
return np.array(target_names) | 0.001905 |
def parse_partlist(str):
'''parse partlist text delivered by eagle.
header is converted to lowercase
:param str: input string
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
lines = str.strip().splitlines()
lines = filter(len, lines)
hind = header_index(lines)
if hind is None:
log.debug('empty partlist found')
return ([], [])
header_line = lines[hind]
header = header_line.split(' ')
header = filter(len, header)
positions = [header_line.index(x) for x in header]
header = [x.strip().split()[0].lower() for x in header]
data_lines = lines[hind + 1:]
def parse_data_line(line):
y = [(h, line[pos1:pos2].strip()) for h, pos1, pos2 in zip(
header, positions, positions[1:] + [1000])]
return dict(y)
data = [parse_data_line(x) for x in data_lines]
return (header, data) | 0.002112 |
def coalescence_waiting_times(self, backward=True):
'''Generator over the waiting times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
'''
if not isinstance(backward, bool):
raise TypeError("backward must be a bool")
times = list(); lowest_leaf_dist = float('-inf')
for n,d in self.distances_from_root():
if len(n.children) > 1:
times.append(d)
elif len(n.children) == 0 and d > lowest_leaf_dist:
lowest_leaf_dist = d
times.append(lowest_leaf_dist)
times.sort(reverse=backward)
for i in range(len(times)-1):
yield abs(times[i]-times[i+1]) | 0.006281 |
def _add_parser_arguments_analyze(self, subparsers):
"""Create a parser for the 'analyze' subcommand.
"""
lyze_pars = subparsers.add_parser(
"analyze",
help="Perform basic analysis on this catalog.")
lyze_pars.add_argument(
'--count', '-c', dest='count',
default=False, action='store_true',
help='Determine counts of entries, files, etc.')
return lyze_pars | 0.004367 |
def get_display_name(self):
"""Creates a display name"""
return DisplayText(text=self.id_.get_identifier(),
language_type=DEFAULT_LANGUAGE_TYPE,
script_type=DEFAULT_SCRIPT_TYPE,
format_type=DEFAULT_FORMAT_TYPE,) | 0.006494 |
def _recurse(data, obj):
"""Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those.
"""
content = _ContentManager()
for child in obj.get_children():
# Some patches are Spines, too; skip those entirely.
# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.
if isinstance(child, mpl.spines.Spine):
continue
if isinstance(child, mpl.axes.Axes):
ax = axes.Axes(data, child)
if ax.is_colorbar:
continue
# add extra axis options
if data["extra axis options [base]"]:
ax.axis_options.extend(data["extra axis options [base]"])
data["current mpl axes obj"] = child
data["current axes"] = ax
# Run through the child objects, gather the content.
data, children_content = _recurse(data, child)
# populate content and add axis environment if desired
if data["add axis environment"]:
content.extend(
ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0
)
else:
content.extend(children_content, 0)
# print axis environment options, if told to show infos
if data["show_info"]:
print("=========================================================")
print("These would have been the properties of the environment:")
print("".join(ax.get_begin_code()[1:]))
print("=========================================================")
elif isinstance(child, mpl.lines.Line2D):
data, cont = line2d.draw_line2d(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.image.AxesImage):
data, cont = img.draw_image(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.patches.Patch):
data, cont = patch.draw_patch(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(
child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection)
):
data, cont = patch.draw_patchcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.PathCollection):
data, cont = path.draw_pathcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.LineCollection):
data, cont = line2d.draw_linecollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.QuadMesh):
data, cont = qmsh.draw_quadmesh(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.legend.Legend):
data = legend.draw_legend(data, child)
if data["legend colors"]:
content.extend(data["legend colors"], 0)
elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)):
data, cont = text.draw_text(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)):
pass
else:
warnings.warn(
"matplotlib2tikz: Don't know how to handle object {}.".format(
type(child)
)
)
return data, content.flatten() | 0.001649 |
def get_compatible_generator_action(self, filename):
"""
Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found.
Args:
filename (str): The filename of the template to process.
"""
# find first compatible generator action
for action in self.__generator_actions:
if action.act_on_file(filename):
return action
return None | 0.006397 |
def fix_remaining_type_comments(node):
"""Converts type comments in `node` to proper annotated assignments."""
assert node.type == syms.file_input
last_n = None
for n in node.post_order():
if last_n is not None:
if n.type == token.NEWLINE and is_assignment(last_n):
fix_variable_annotation_type_comment(n, last_n)
elif n.type == syms.funcdef and last_n.type == syms.suite:
fix_signature_annotation_type_comment(n, last_n, offset=1)
elif n.type == syms.async_funcdef and last_n.type == syms.suite:
fix_signature_annotation_type_comment(n, last_n, offset=2)
last_n = n | 0.001464 |
def insert_empty_columns(self, x: int, amount: int = 1) -> None:
"""Insert a number of columns after the given column."""
def transform_columns(
column: Union[int, float],
row: Union[int, float]
) -> Tuple[Union[int, float], Union[int, float]]:
return column + (amount if column >= x else 0), row
self._transform_coordinates(transform_columns) | 0.004773 |
def json(
self,
*,
include: 'SetStr' = None,
exclude: 'SetStr' = None,
by_alias: bool = False,
skip_defaults: bool = False,
encoder: Optional[Callable[[Any], Any]] = None,
**dumps_kwargs: Any,
) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
"""
encoder = cast(Callable[[Any], Any], encoder or self._json_encoder)
return json.dumps(
self.dict(include=include, exclude=exclude, by_alias=by_alias, skip_defaults=skip_defaults),
default=encoder,
**dumps_kwargs,
) | 0.007547 |
def ret(f, *args, **kwargs):
"""Automatically log progress on function entry and exit. Default logging
value: info. The function's return value will be included in the logs.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: INFO
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'print_return': True})
return _stump(f, *args, **kwargs) | 0.001078 |
def debug_processor(self, _type, text):
"""
Process request details.
0: CURLINFO_TEXT
1: CURLINFO_HEADER_IN
2: CURLINFO_HEADER_OUT
3: CURLINFO_DATA_IN
4: CURLINFO_DATA_OUT
5: CURLINFO_unrecognized_type
"""
if _type == pycurl.INFOTYPE_HEADER_OUT:
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_head += text
if _type == pycurl.INFOTYPE_DATA_OUT:
# Untill 7.19.5.2 version
# pycurl gives unicode in `text` variable
# WTF??? Probably that codes would fails
# or does unexpected things if you use
# pycurl<7.19.5.2
if isinstance(text, six.text_type):
text = text.encode('utf-8')
self.request_body += text
#if _type == pycurl.INFOTYPE_TEXT:
# if self.request_log is None:
# self.request_log = ''
# self.request_log += text
if self.verbose_logging:
if _type in (pycurl.INFOTYPE_TEXT, pycurl.INFOTYPE_HEADER_IN,
pycurl.INFOTYPE_HEADER_OUT):
marker_types = {
pycurl.INFOTYPE_TEXT: 'i',
pycurl.INFOTYPE_HEADER_IN: '<',
pycurl.INFOTYPE_HEADER_OUT: '>',
}
marker = marker_types[_type]
logger.debug('%s: %s', marker, text.rstrip()) | 0.002008 |
def system_drop_column_family(self, column_family):
"""
drops a column family. returns the new schema id.
Parameters:
- column_family
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_system_drop_column_family(column_family)
return d | 0.003344 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.