text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def authorized_create_user(self, identities=None, primary=None, permissions=None):
"""Creates Vingd user (profile & account), links it with the provided
identities (to be verified later), and sets the delegate-user
permissions (creator being the delegate). Returns Vingd user's `huid`
(hashed user id).
Example::
vingd.authorized_create_user(
identities={"facebook": "12312312", "mail": "[email protected]"},
primary="facebook",
permissions=["get.account.balance", "purchase.object"]
)
If `identities` and `primary` are unspecified, a "zombie" ("headless")
account is created (i.e. account with no identities associated,
user-unreachable).
:rtype: ``dict``
:returns: ``{'huid': <huid>}``
:raises GeneralException:
:resource: ``id/objects/<oid>/purchases``
:access: authorized users with ACL flag ``user.create``
"""
return self.request('post', 'id/users/', json.dumps({
'identities': identities,
'primary_identity': primary,
'delegate_permissions': permissions
})) | 0.007235 |
def rename(self, new_folder_name):
"""Renames the Folder to the provided name.
Args:
new_folder_name: A string of the replacement name.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
A new Folder representing the folder with the new name on Outlook.
"""
headers = self.headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id
payload = '{ "DisplayName": "' + new_folder_name + '"}'
r = requests.patch(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
return self._json_to_folder(self.account, return_folder) | 0.005083 |
def _get_default_mapping(self, obj):
"""Return default mapping if there are no special needs."""
mapping = {v: k for k, v in obj.TYPE_MAPPING.items()}
mapping.update({
fields.Email: text_type,
fields.Dict: dict,
fields.Url: text_type,
fields.List: list,
fields.LocalDateTime: datetime.datetime,
fields.Nested: '_from_nested_schema',
})
return mapping | 0.004329 |
def between(start, delta, end=None):
"""Return an iterator between this date till given end point.
Example usage:
>>> d = datetime_tz.smartparse("5 days ago")
2008/05/12 11:45
>>> for i in d.between(timedelta(days=1), datetime_tz.now()):
>>> print i
2008/05/12 11:45
2008/05/13 11:45
2008/05/14 11:45
2008/05/15 11:45
2008/05/16 11:45
Args:
start: The date to start at.
delta: The interval to iterate with.
end: (Optional) Date to end at. If not given the iterator will never
terminate.
Yields:
datetime_tz objects.
"""
toyield = start
while end is None or toyield < end:
yield toyield
toyield += delta | 0.004071 |
def set(self, key, value):
"""
Sets the value for a specific requirement.
:param key: Name of requirement to be set
:param value: Value to set for requirement key
:return: Nothing, modifies requirement
"""
if key == "tags":
self._set_tag(tags=value)
else:
if isinstance(value, dict) and key in self._requirements and isinstance(
self._requirements[key], dict):
self._requirements[key] = merge(self._requirements[key], value)
else:
self._requirements[key] = value | 0.004878 |
def fields(self):
'''Return a tuple of ordered fields for this :class:`ColumnTS`.'''
key = self.id + ':fields'
encoding = self.client.encoding
return tuple(sorted((f.decode(encoding)
for f in self.client.smembers(key)))) | 0.006993 |
def replace_version_string(content, variable, new_version):
"""
Given the content of a file, finds the version string and updates it.
:param content: The file contents
:param variable: The version variable name as a string
:param new_version: The new version number as a string
:return: A string with the updated version number
"""
return re.sub(
r'({0} ?= ?["\'])\d+\.\d+(?:\.\d+)?(["\'])'.format(variable),
r'\g<1>{0}\g<2>'.format(new_version),
content
) | 0.001934 |
def get_abs_filename_with_sub_path(sub_path, filename):
"""
生成当前路径下一级路径某文件的完整文件名;
:param:
* sub_path: (string) 下一级的某路径名称
* filename: (string) 下一级路径的某个文件名
:returns:
* 返回类型 (tuple),有两个值,第一个为 flag,第二个为文件名,说明见下
* flag: (bool) 如果文件存在,返回 True,文件不存在,返回 False
* abs_filename: (string) 指定 filename 的包含路径的长文件名
举例如下::
print('--- get_abs_filename_with_sub_path demo ---')
# define sub dir
path_name = 'sub_dir'
# define not exists file
filename = 'test_file.txt'
abs_filename = get_abs_filename_with_sub_path(path_name, filename)
# return False and abs filename
print(abs_filename)
# define exists file
filename = 'demo.txt'
abs_filename = get_abs_filename_with_sub_path(path_name, filename)
# return True and abs filename
print(abs_filename)
print('---')
输出结果::
--- get_abs_filename_with_sub_path demo ---
(False, '/Users/****/Documents/dev_python/fishbase/demo/sub_dir/test_file.txt')
(True, '/Users/****/Documents/dev_python/fishbase/demo/sub_dir/demo.txt')
---
"""
try:
cur_path = pathlib.Path.cwd()
abs_filename = cur_path / pathlib.Path(sub_path) / filename
flag = pathlib.Path.is_file(abs_filename)
# 将 path 对象转换成字符串
return flag, str(abs_filename)
except:
flag = False
return flag, None | 0.004326 |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
"""
location = getattr(path_spec, 'location', None)
if location is None:
return False
is_device = False
if platform.system() == 'Windows':
# Note that os.path.exists() returns False for Windows device files so
# instead use libsmdev to do the check.
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
# Since pysmdev will raise IOError when it has no access to the device
# we check if the exception message contains ' access denied ' and
# return true.
# Note that exception.message no longer works in Python 3.
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors='replace')
if ' access denied ' in exception_string:
is_device = True
# Note that os.path.exists() returns False for broken symbolic links hence
# an additional check using os.path.islink() is necessary.
return is_device or os.path.exists(location) or os.path.islink(location) | 0.005768 |
def GET_name_history(self, path_info, name):
"""
Get the history of a name or subdomain.
Requires 'page' in the query string
return the history on success
return 400 on invalid start_block or end_block
return 502 on failure to query blockstack server
"""
if not check_name(name) and not check_subdomain(name):
return self._reply_json({'error': 'Invalid name or subdomain'}, status_code=400)
qs_values = path_info['qs_values']
page = qs_values.get('page', None)
if page is None:
page = "0" # compatibility
try:
assert len(page) < 10
page = int(page)
assert page >= 0
assert page <= 2**32 - 1
except:
log.error("Invalid page")
self._reply_json({'error': 'Invalid page'}, status_code=400)
return
blockstackd_url = get_blockstackd_url()
res = blockstackd_client.get_name_history_page(name, page, hostport=blockstackd_url)
if json_is_error(res):
log.error('Failed to get name history for {}: {}'.format(name, res['error']))
return self._reply_json({'error': res['error']}, status_code=res.get('http_status', 502))
return self._reply_json(res['history']) | 0.00602 |
def _check_dep_time_is_valid(self, dep_time):
"""
A simple checker, that connections are coming in descending order of departure time
and that no departure time has been "skipped".
Parameters
----------
dep_time
Returns
-------
None
"""
assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time."
dep_time_index = self.dep_times_to_index[dep_time]
if self._min_dep_time < float('inf'):
min_dep_index = self.dep_times_to_index[self._min_dep_time]
assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \
"dep times should be ordered sequentially"
else:
assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)"
self._min_dep_time = dep_time | 0.006349 |
def delete(filename, conn=None):
"""
deletes a file
filename being a value in the "id" key
:param filename: <str>
:param conn: <rethinkdb.DefaultConnection>
:return: <dict>
"""
return RBF.filter((r.row[PRIMARY_FIELD] == filename) | (r.row[PARENT_FIELD] == filename)).delete().run(conn) | 0.006309 |
def mainloop(self):
""" The main loop.
"""
self._validate_config()
config.engine.load_config()
# Defaults for process control paths
if not self.options.no_fork and not self.options.guard_file:
self.options.guard_file = os.path.join(config.config_dir, "run/pyrotorque")
if not self.options.pid_file:
self.options.pid_file = os.path.join(config.config_dir, "run/pyrotorque.pid")
# Process control
if self.options.status or self.options.stop or self.options.restart:
if self.options.pid_file and os.path.exists(self.options.pid_file):
running, pid = osmagic.check_process(self.options.pid_file)
else:
running, pid = False, 0
if self.options.stop or self.options.restart:
if running:
os.kill(pid, signal.SIGTERM)
# Wait for termination (max. 10 secs)
for _ in range(100):
running, _ = osmagic.check_process(self.options.pid_file)
if not running:
break
time.sleep(.1)
self.LOG.info("Process #%d stopped." % (pid))
elif pid:
self.LOG.info("Process #%d NOT running anymore." % (pid))
else:
self.LOG.info("No pid file '%s'" % (self.options.pid_file or "<N/A>"))
else:
self.LOG.info("Process #%d %s running." % (pid, "UP and" if running else "NOT"))
if self.options.restart:
if self.options.pid_file:
running, pid = osmagic.check_process(self.options.pid_file)
if running:
self.return_code = error.EX_TEMPFAIL
return
else:
self.return_code = error.EX_OK if running else error.EX_UNAVAILABLE
return
# Check for guard file and running daemon, abort if not OK
try:
osmagic.guard(self.options.pid_file, self.options.guard_file)
except EnvironmentError as exc:
self.LOG.debug(str(exc))
self.return_code = error.EX_TEMPFAIL
return
# Detach, if not disabled via option
if not self.options.no_fork: # or getattr(sys.stdin, "isatty", lambda: False)():
osmagic.daemonize(pidfile=self.options.pid_file, logfile=logutil.get_logfile())
time.sleep(.05) # let things settle a little
signal.signal(signal.SIGTERM, _raise_interrupt)
# Set up services
from apscheduler.scheduler import Scheduler
self.sched = Scheduler(config.torque)
self._init_wsgi_server()
# Run services
self.sched.start()
try:
self._add_jobs()
# TODO: daemonize here, or before the scheduler starts?
self._run_forever()
finally:
self.sched.shutdown()
if self.wsgi_server:
self.wsgi_server.task_dispatcher.shutdown()
self.wsgi_server = None
if self.options.pid_file:
try:
os.remove(self.options.pid_file)
except EnvironmentError as exc:
self.LOG.warn("Failed to remove pid file '%s' (%s)" % (self.options.pid_file, exc))
self.return_code = error.EX_IOERR | 0.003705 |
def switch_delete_record_for_nic(self, userid, interface):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=? and interface=?",
(userid, interface))
LOG.debug("Switch record for user %s with nic %s is removed from "
"switch table" % (userid, interface)) | 0.004728 |
async def remember(request, response, identity, **kwargs):
"""Remember identity into response.
The action is performed by identity_policy.remember()
Usually the identity is stored in user cookies somehow but may be
pushed into custom header also.
"""
assert isinstance(identity, str), identity
assert identity
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.remember(request, response, identity, **kwargs) | 0.001166 |
def compact(*args):
"""Returns a new list after removing any non-true values"""
use_comma = True
if len(args) == 1 and isinstance(args[0], List):
use_comma = args[0].use_comma
args = args[0]
return List(
[arg for arg in args if arg],
use_comma=use_comma,
) | 0.003236 |
def parse_spss_datafile(path, **kwargs):
"""
Parse spss data file
Arguments:
path {str} -- path al fichero de cabecera.
**kwargs {[dict]} -- otros argumentos que puedan llegar
"""
data_clean = []
with codecs.open(path, 'r', kwargs.get('encoding', 'latin-1')) as file_:
raw_file = file_.read()
data_clean = raw_file.split('\r\n')
return exclude_empty_values(data_clean) | 0.002331 |
def servicegroup_exists(sg_name, sg_type=None, **connection_args):
'''
Checks if a service group exists
CLI Example:
.. code-block:: bash
salt '*' netscaler.servicegroup_exists 'serviceGroupName'
'''
sg = _servicegroup_get(sg_name, **connection_args)
if sg is None:
return False
if sg_type is not None and sg_type.upper() != sg.get_servicetype():
return False
return True | 0.002304 |
def main():
"""
NAME
basemap_magic.py
NB: this program no longer maintained - use plot_map_pts.py for greater functionality
DESCRIPTION
makes a map of locations in er_sites.txt
SYNTAX
basemap_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f SFILE, specify er_sites.txt or pmag_results.txt format file
-res [c,l,i,h] specify resolution (crude,low,intermediate,high)
-etp plot the etopo20 topographic mesh
-pad [LAT LON] pad bounding box by LAT/LON (default is [.5 .5] degrees)
-grd SPACE specify grid spacing
-prj [lcc] , specify projection (lcc=lambert conic conformable), default is mercator
-n print site names (default is not)
-l print location names (default is not)
-o color ocean blue/land green (default is not)
-R don't plot details of rivers
-B don't plot national/state boundaries, etc.
-sav save plot and quit quietly
-fmt [png,svg,eps,jpg,pdf] specify format for output, default is pdf
DEFAULTS
SFILE: 'er_sites.txt'
resolution: intermediate
saved images are in pdf
"""
dir_path = '.'
sites_file = 'er_sites.txt'
ocean = 0
res = 'i'
proj = 'merc'
prn_name = 0
prn_loc = 0
fancy = 0
rivers, boundaries = 0, 0
padlon, padlat, gridspace, details = .5, .5, .5, 1
fmt = 'pdf'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
sites_file = sys.argv[ind+1]
if '-res' in sys.argv:
ind = sys.argv.index('-res')
res = sys.argv[ind+1]
if '-etp' in sys.argv:
fancy = 1
if '-n' in sys.argv:
prn_name = 1
if '-l' in sys.argv:
prn_loc = 1
if '-o' in sys.argv:
ocean = 1
if '-R' in sys.argv:
rivers = 0
if '-B' in sys.argv:
boundaries = 0
if '-prj' in sys.argv:
ind = sys.argv.index('-prj')
proj = sys.argv[ind+1]
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
verbose = pmagplotlib.verbose
if '-sav' in sys.argv:
verbose = 0
if '-pad' in sys.argv:
ind = sys.argv.index('-pad')
padlat = float(sys.argv[ind+1])
padlon = float(sys.argv[ind+2])
if '-grd' in sys.argv:
ind = sys.argv.index('-grd')
gridspace = float(sys.argv[ind+1])
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
sites_file = dir_path+'/'+sites_file
location = ""
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in er_sites file
Sites, file_type = pmag.magic_read(sites_file)
if 'results' in file_type:
latkey = 'average_lat'
lonkey = 'average_lon'
namekey = 'pmag_result_name'
lockey = 'er_location_names'
else:
latkey = 'site_lat'
lonkey = 'site_lon'
namekey = 'er_site_name'
lockey = 'er_location_name'
lats, lons = [], []
slats, slons = [], []
names, locs = [], []
for site in Sites:
if prn_loc == 1 and location == "":
location = site['er_location_name']
lats.append(float(site[latkey]))
l = float(site[lonkey])
if l < 0:
l = l+360. # make positive
lons.append(l)
if prn_name == 1:
names.append(site[namekey])
if prn_loc == 1:
locs.append(site[lockey])
for lat in lats:
slats.append(lat)
for lon in lons:
slons.append(lon)
Opts = {'res': res, 'proj': proj, 'loc_name': locs, 'padlon': padlon, 'padlat': padlat, 'latmin': numpy.min(slats)-padlat, 'latmax': numpy.max(
slats)+padlat, 'lonmin': numpy.min(slons)-padlon, 'lonmax': numpy.max(slons)+padlon, 'sym': 'ro', 'boundinglat': 0., 'pltgrid': 1.}
Opts['lon_0'] = 0.5*(numpy.min(slons)+numpy.max(slons))
Opts['lat_0'] = 0.5*(numpy.min(slats)+numpy.max(slats))
Opts['names'] = names
Opts['gridspace'] = gridspace
Opts['details'] = {'coasts': 1, 'rivers': 1,
'states': 1, 'countries': 1, 'ocean': 0}
if ocean == 1:
Opts['details']['ocean'] = 1
if rivers == 1:
Opts['details']['rivers'] = 0
if boundaries == 1:
Opts['details']['states'] = 0
Opts['details']['countries'] = 0
Opts['details']['fancy'] = fancy
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
if verbose:
pmagplotlib.draw_figs(FIG)
files = {}
for key in list(FIG.keys()):
files[key] = 'Site_map'+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['map'] = 'Site Map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif verbose:
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
else:
pmagplotlib.save_plots(FIG, files) | 0.00155 |
def get_groups(self, username):
""" Get a user's groups
:param username: 'key' attribute of the user
:type username: string
:rtype: list of groups
"""
try:
return self.users[username]['groups']
except Exception as e:
raise UserDoesntExist(username, self.backend_name) | 0.005747 |
def _json_clean(d):
"""Cleans the specified python `dict` by converting any tuple keys to
strings so that they can be serialized by JSON.
Args:
d (dict): python dictionary to clean up.
Returns:
dict: cleaned-up dictionary.
"""
result = {}
compkeys = {}
for k, v in d.items():
if not isinstance(k, tuple):
result[k] = v
else:
#v is a list of entries for instance methods/constructors on the
#UUID of the key. Instead of using the composite tuple keys, we
#switch them for a string using the
key = "c.{}".format(id(k))
result[key] = v
compkeys[key] = k
return (result, compkeys) | 0.006831 |
def isHcl(s):
'''
Detects whether a string is JSON or HCL
:param s: String that may contain HCL or JSON
:returns: True if HCL, False if JSON, raises ValueError
if neither
'''
for c in s:
if c.isspace():
continue
if c == '{':
return False
else:
return True
raise ValueError("No HCL object could be decoded") | 0.006757 |
def _expand_alts_and_remove_duplicates_in_list(cls, vcf_records, ref_seq, indel_gap=100):
'''Input: list of VCF records, all from the same CHROM. ref_seq = sequence
of that CHROM. Expands any record in the list that has >ALT, into
one record per ALT. Removes duplicated records, where REF and ALT
are the same (at the same position!), or where there is the same
indel more than once, but written in a different way (eg indel in
homopolymer run can be put in >1 way in a VCF. Checks indels
are the same within indel_gap nucleotides of each other'''
expanded_vcf_records = VcfClusterer._expand_alts_in_vcf_record_list(vcf_records)
new_vcf_records = [x for x in expanded_vcf_records if not x.is_snp()]
for i in range(len(new_vcf_records) - 1):
j = i + 1
while j < len(new_vcf_records) and new_vcf_records[i].ref_end_pos() + indel_gap > new_vcf_records[j].POS:
if new_vcf_records[i].is_the_same_indel(new_vcf_records[j], ref_seq):
new_vcf_records.pop(j)
else:
j += 1
new_vcf_records.extend([x for x in expanded_vcf_records if x.is_snp()])
new_vcf_records.sort(key=operator.attrgetter('POS'))
return new_vcf_records | 0.006093 |
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
"""
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R | 0.006031 |
def cable_page_by_id(reference_id):
"""\
Experimental: Returns the HTML page of the cable identified by `reference_id`.
>>> cable_page_by_id('09BERLIN1167') is not None
True
>>> cable_page_by_id('22BERLIN1167') is None
True
>>> cable_page_by_id('09MOSCOW3010') is not None
True
>>> cable_page_by_id('10MADRID87') is not None
True
>>> cable_page_by_id('10MUSCAT103') is not None
True
"""
global _CABLEID2MONTH
def wikileaks_id(reference_id):
if reference_id in consts.INVALID_CABLE_IDS.values():
for k, v in consts.INVALID_CABLE_IDS.iteritems():
if v == reference_id:
return k
return reference_id
def wikileaks_url(wl_id):
m = _CABLEID2MONTH.get(wl_id)
if m is None:
return None
y = wl_id[:2]
y = u'19' + y if int(y) > 10 else u'20' + y
return u'https://wikileaks.org/cable/%s/%s/%s' % (y, m.zfill(2), wl_id)
if _CABLEID2MONTH is None:
with gzip.open(os.path.join(os.path.dirname(__file__), 'cable2month.csv.gz'), 'r') as f:
reader = csv.reader(f)
_CABLEID2MONTH = dict(reader)
wl_id = wikileaks_id(reference_id)
wl_url = wikileaks_url(wl_id)
if wl_url is None:
# The cable reference is not known, try to consult Cablegatesearch.
html = _fetch_url(_CGSN_BASE + wl_id)
m = _CGSN_WL_SOURCE_SEARCH(html)
wl_url = m.group(1) if m else None
if wl_url is None:
return None
return _fetch_url(wl_url) | 0.002548 |
def from_es(self, hit):
"""Returns a Django model instance, using a document from Elasticsearch"""
doc = hit.copy()
klass = shallow_class_factory(self.model)
# We can pass in the entire source, except when we have a non-indexable many-to-many
for field in self.model._meta.get_fields():
if not field.auto_created and field.many_to_many:
if not issubclass(field.rel.to, Indexable):
if field.name in doc["_source"]:
del doc["_source"][field.name]
# if field.one_to_many:
# if field.name in doc["_source"]:
# del doc["_source"][field.name]
# Now let's go ahead and parse all the fields
fields = self.mapping.properties.properties
for key in fields:
# TODO: What if we've mapped the property to a different name? Will we allow that?
field = fields[key]
# if isinstance(field, InnerObject):
# import pdb; pdb.set_trace()
# continue
if doc["_source"].get(key):
attribute_value = doc["_source"][key]
doc["_source"][key] = field.to_python(attribute_value)
return klass(**doc["_source"]) | 0.003876 |
async def fetch_webhook(self, webhook_id):
"""|coro|
Retrieves a :class:`.Webhook` with the specified ID.
Raises
--------
HTTPException
Retrieving the webhook failed.
NotFound
Invalid webhook ID.
Forbidden
You do not have permission to fetch this webhook.
Returns
---------
:class:`.Webhook`
The webhook you requested.
"""
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection) | 0.003448 |
def tag_and_push_image(self, image, target_image, insecure=False, force=False,
dockercfg=None):
"""
tag provided image and push it to registry
:param image: str or ImageName, image id or name
:param target_image: ImageName, img
:param insecure: bool, allow connecting to registry over plain http
:param force: bool, force the tag?
:param dockercfg: path to docker config
:return: str, image (reg.com/img:v1)
"""
logger.info("tagging and pushing image '%s' as '%s'", image, target_image)
logger.debug("image = '%s', target_image = '%s'", image, target_image)
self.tag_image(image, target_image, force=force)
if dockercfg:
self.login(registry=target_image.registry, docker_secret_path=dockercfg)
return self.push_image(target_image, insecure=insecure) | 0.005549 |
def F(self, **kwargs):
'''
Returns the Kane remote-band parameter, `F`, calculated from
`Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`.
'''
Eg = self.Eg_Gamma_0(**kwargs)
Delta_SO = self.Delta_SO(**kwargs)
Ep = self.Ep(**kwargs)
meff = self.meff_e_Gamma_0(**kwargs)
return (1./meff-1-(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))/2 | 0.004902 |
def Cvgm(self):
r'''Gas-phase ideal-gas contant-volume heat capacity of the mixture at
its current temperature and composition, in units of [J/mol/K]. Subtracts R from
the ideal-gas heat capacity; does not include pressure-compensation
from an equation of state.
Examples
--------
>>> Mixture(['water'], ws=[1], T=520).Cvgm
27.13366316134193
'''
Cpgm = self.HeatCapacityGasMixture(self.T, self.P, self.zs, self.ws)
if Cpgm:
return Cpgm - R
return None | 0.005357 |
def _decompose_(self, qubits):
"""See base class."""
a, b = qubits
yield CNOT(a, b)
yield CNOT(b, a) ** self._exponent
yield CNOT(a, b) | 0.011429 |
def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,
cert_contents=None, key_path=None, key_contents=None):
"""
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
"""
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path,
contents=ca_contents,
mode=DEFAULT_FILE_MODE))
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(dict(path=cert_path,
contents=cert_contents,
mode=DEFAULT_FILE_MODE))
if key_path and key_contents:
client['key'] = key_path
files.append(dict(path=key_path,
contents=key_contents,
mode=DEFAULT_FILE_MODE,))
return files | 0.005709 |
def _compute_distance_scaling(self, C, mag, rrup):
"""
Compute distance scaling term (eq.3, page 319).
The distance scaling assumes the near-source effect of local site
conditions due to 50% very firm soil and soft rock and 50% firm rock.
"""
g = C['c5'] + C['c6'] * 0.5 + C['c7'] * 0.5
return (
rrup ** 2 +
(np.exp(C['c8'] * mag + C['c9'] * (8.5 - mag) ** 2) * g) ** 2
) | 0.004338 |
def version_cmp(pkg1, pkg2, **kwargs):
'''
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2.4-0' '0.2.4.1-0'
'''
# ignore_epoch is not supported here, but has to be included for API
# compatibility. Rather than putting this argument into the function
# definition (and thus have it show up in the docs), we just pop it out of
# the kwargs dict and then raise an exception if any kwargs other than
# ignore_epoch were passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
kwargs.pop('ignore_epoch', None)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
regex = r'^~?([^:\[]+):?[^\[]*\[?.*$'
ver1 = re.match(regex, pkg1)
ver2 = re.match(regex, pkg2)
if ver1 and ver2:
return portage.versions.vercmp(ver1.group(1), ver2.group(1))
return None | 0.000967 |
def _getWSAddressTypeCodes(self, **kw):
'''kw -- namespaceURI keys with sequence of element names.
'''
typecodes = []
try:
for nsuri,elements in kw.items():
for el in elements:
typecode = GED(nsuri, el)
if typecode is None:
raise WSActionException, 'Missing namespace, import "%s"' %nsuri
typecodes.append(typecode)
else:
pass
except EvaluateException, ex:
raise EvaluateException, \
'To use ws-addressing register typecodes for namespace(%s)' %self.wsAddressURI
return typecodes | 0.012931 |
def record_udp_port(self, port):
"""
Associate a reserved UDP port number with this project.
:param port: UDP port number
"""
if port not in self._used_udp_ports:
self._used_udp_ports.add(port) | 0.008097 |
def pixel_to_utm(row, column, transform):
""" Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
"""
east = transform[0] + column * transform[1]
north = transform[3] + row * transform[5]
return east, north | 0.003367 |
def send_media_group(self, chat_id, media, disable_notification=None, reply_to_message_id=None):
"""
Use this method to send a group of photos or videos as an album. On success, an array of the sent Messages is returned.
https://core.telegram.org/bots/api#sendmediagroup
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param media: A array describing photos and videos to be sent, must include 2–10 items
:type media: list of (pytgbot.api_types.sendable.input_media.InputMediaPhoto|pytgbot.api_types.sendable.input_media.InputMediaVideo)
Optional keyword parameters:
:param disable_notification: Sends the messages silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param reply_to_message_id: If the messages are a reply, ID of the original message
:type reply_to_message_id: int
Returns:
:return: On success, an array of the sent Messages is returned
:rtype: Messages
"""
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
from .api_types.sendable.input_media import InputMediaPhoto, InputMediaVideo
files = {}
new_media = []
assert_type_or_raise(media, list, parameter_name="media")
for i, medium in enumerate(media):
assert_type_or_raise(medium, InputMediaPhoto, InputMediaVideo, parameter_name="media[{i}]".format(i=i))
assert isinstance(medium, (InputMediaPhoto, InputMediaVideo))
new_medium, file = medium.get_request_data('pytgbot{i}'.format(i=i), full_data=True)
logger.debug('InputMedia {} found.'.format(new_medium))
new_media.append(new_medium)
if file:
files.update(file)
# end if
# end for
new_media = json.dumps(new_media)
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
assert_type_or_raise(reply_to_message_id, None, int, parameter_name="reply_to_message_id")
result = self.do(
"sendMediaGroup", chat_id=chat_id, media=new_media, files=files,
disable_notification=disable_notification, reply_to_message_id=reply_to_message_id,
)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result))) # no valid parsing so far
if not isinstance(result, list):
raise TgApiParseException("Could not parse result als list.") # See debug log for details!
# end if
from .api_types.receivable.updates import Message
return [Message.from_array(msg) for msg in result] # parse them all as Message.
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | 0.006487 |
def _vmomentsurfaceIntegrand(vR,vT,R,az,df,n,m,sigmaR1,sigmaT1,t,initvmoment):
"""Internal function that is the integrand for the velocity moment times
surface mass integration"""
o= Orbit([R,vR*sigmaR1,vT*sigmaT1,az])
return vR**n*vT**m*df(o,t)/initvmoment | 0.058608 |
def margin_logit_loss(model_logits, label, nb_classes=10, num_classes=None):
"""Computes difference between logit for `label` and next highest logit.
The loss is high when `label` is unlikely (targeted by default).
This follows the same interface as `loss_fn` for TensorOptimizer and
projected_optimization, i.e. it returns a batch of loss values.
"""
if num_classes is not None:
warnings.warn("`num_classes` is depreciated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
if 'int' in str(label.dtype):
logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1)
else:
logit_mask = label
if 'int' in str(logit_mask.dtype):
logit_mask = tf.to_float(logit_mask)
try:
label_logits = reduce_sum(logit_mask * model_logits, axis=-1)
except TypeError:
raise TypeError("Could not take row-wise dot product between "
"logit mask, of dtype " + str(logit_mask.dtype)
+ " and model_logits, of dtype "
+ str(model_logits.dtype))
logits_with_target_label_neg_inf = model_logits - logit_mask * 99999
highest_nonlabel_logits = reduce_max(
logits_with_target_label_neg_inf, axis=-1)
loss = highest_nonlabel_logits - label_logits
return loss | 0.008935 |
def get_masked_cnv_manifest(tcga_id):
"""Get manifest for masked TCGA copy-number variation data.
Params
------
tcga_id : str
The TCGA project ID.
download_file : str
The path of the download file.
Returns
-------
`pandas.DataFrame`
The manifest.
"""
payload = {
"filters": json.dumps({
"op": "and",
"content" : [
{
"op":"in",
"content":{
"field":"cases.project.program.name",
"value":["TCGA"]}},
{
"op":"in",
"content":{
"field":"cases.project.project_id",
"value":[tcga_id]}},
{
"op":"in",
"content":{
"field":"files.data_category",
"value":["Copy Number Variation"]}},
{
"op":"in",
"content":{
"field":"files.data_type",
"value":["Masked Copy Number Segment"]}}]
}),
"return_type":"manifest",
"size":10000,
}
r = requests.get('https://gdc-api.nci.nih.gov/files', params=payload)
df = pd.read_csv(io.StringIO(r.text), sep='\t', header=0)
logger.info('Obtained manifest with %d files.', df.shape[0])
return df | 0.014855 |
def check_auth(username, pwd):
"""This function is called to check if a username /
password combination is valid.
"""
cfg = get_current_config()
return username == cfg["dashboard_httpauth"].split(
":")[0] and pwd == cfg["dashboard_httpauth"].split(":")[1] | 0.003534 |
def process_pad_frame(self,
id=None,
msg=None):
"""process_pad_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: pad frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "pad_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.pad_keys:
self.pad_keys[new_key] = k
# end of capturing all unique keys
dt["pad_id"] = id
self.all_pad.append(dt)
log.debug("PAD data updated:")
log.debug(self.pad_keys)
log.debug(self.all_pad)
log.debug("")
return flat_msg | 0.004061 |
def filter_keys(cls, data):
"""Filter GELF record keys using exclude_patterns
:param dict data: Log record has dict
:return: the filtered log record
:rtype: dict
"""
keys = list(data.keys())
for pattern in cls.EXCLUDE_PATTERNS:
for key in keys:
if re.match(pattern, key):
keys.remove(key)
return dict(filter(lambda x: x[0] in keys, data.items())) | 0.004357 |
def _parse_metadatas(self, text_lines):
"""
From a given Org text, return the metadatas
Keyword Arguments:
text_lines -- A list, each item is a line of the texte
Return:
A dict containing metadatas
"""
if not text_lines:
return {}
expr_metadata = re.compile(r'^#\+([a-zA-Z]+):(.*)')
return {
expr_metadata.match(line).group(1).lower()
: expr_metadata.match(line).group(2).strip()
for line in text_lines
} | 0.007394 |
def reverseCommit(self):
"""
Remove the inserted character(s).
"""
# Move the cursor to the right of the text to delete.
tc = self.qteWidget.textCursor()
# Delete as many characters as necessary. For an image that would
# be exactly 1 even though the HTML code to embed that image is usually
# longer. For text, it would be as many characters as the pasted text
# was long.
if self.isImage:
dataLen = 1
else:
dataLen = len(self.data)
tc.setPosition(self.selStart + dataLen, QtGui.QTextCursor.MoveAnchor)
for ii in range(dataLen):
tc.deletePreviousChar()
# Add the previously selected text (this may be none).
tc.insertHtml(self.selText)
self.qteWidget.setTextCursor(tc) | 0.002389 |
def create(cls, name, ip_range, comment=None):
"""
Create an AddressRange element
:param str name: Name of element
:param str iprange: iprange of element
:param str comment: comment (optional)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: AddressRange
"""
json = {'name': name,
'ip_range': ip_range,
'comment': comment}
return ElementCreator(cls, json) | 0.003781 |
def draw(self, y_pred, residuals, train=False, **kwargs):
"""
Draw the residuals against the predicted value for the specified split.
It is best to draw the training split first, then the test split so
that the test split (usually smaller) is above the training split;
particularly if the histogram is turned on.
Parameters
----------
y_pred : ndarray or Series of length n
An array or series of predicted target values
residuals : ndarray or Series of length n
An array or series of the difference between the predicted and the
target values
train : boolean, default: False
If False, `draw` assumes that the residual points being plotted
are from the test data; if True, `draw` assumes the residuals
are the train data.
Returns
------
ax : the axis with the plotted figure
"""
if train:
color = self.colors['train_point']
label = "Train $R^2 = {:0.3f}$".format(self.train_score_)
alpha = self.alphas['train_point']
else:
color = self.colors['test_point']
label = "Test $R^2 = {:0.3f}$".format(self.test_score_)
alpha = self.alphas['test_point']
# Update the legend information
self._labels.append(label)
self._colors.append(color)
# Draw the residuals scatter plot
self.ax.scatter(
y_pred, residuals, c=color, alpha=alpha, label=label
)
# Add residuals histogram
if self.hist in {True, 'frequency'}:
self.hax.hist(residuals, bins=50, orientation="horizontal", color=color)
elif self.hist == 'density':
self.hax.hist(
residuals, bins=50, orientation="horizontal", density=True, color=color
)
# Ensure the current axes is always the main residuals axes
plt.sca(self.ax)
return self.ax | 0.002456 |
def _escape(value):
"""Escape a string (key or value) for InfluxDB's line protocol.
:param str|int|float|bool value: The value to be escaped
:rtype: str
"""
value = str(value)
for char, escaped in {' ': '\ ', ',': '\,', '"': '\"'}.items():
value = value.replace(char, escaped)
return value | 0.011142 |
def superkey(self):
"""Returns a set of column names that together constitute the superkey."""
sorted_list = []
for header in self.header:
if header in self._keys:
sorted_list.append(header)
return sorted_list | 0.011152 |
def transform(self, data):
"""
Transforms the data.
"""
if not self._get("fitted"):
raise RuntimeError("`transform` called before `fit` or `fit_transform`.")
data = data.copy()
output_column_prefix = self._get("output_column_prefix")
if output_column_prefix is None:
prefix = ""
else:
prefix = output_column_prefix + '.'
transform_function = self._get("transform_function")
feature_columns = self._get("features")
feature_columns = _internal_utils.select_feature_subset(data, feature_columns)
for f in feature_columns:
data[prefix + f] = transform_function(data[f])
return data | 0.005435 |
def IIR_filter_design(CentralFreq, bandwidth, transitionWidth, SampleFreq, GainStop=40, GainPass=0.01):
"""
Function to calculate the coefficients of an IIR filter,
IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a
can produce IIR filters with higher sample rates and are prefereable
due to this.
Parameters
----------
CentralFreq : float
Central frequency of the IIR filter to be designed
bandwidth : float
The width of the passband to be created about the central frequency
transitionWidth : float
The width of the transition band between the pass-band and stop-band
SampleFreq : float
The sample frequency (rate) of the data to be filtered
GainStop : float, optional
The dB of attenuation within the stopband (i.e. outside the passband)
GainPass : float, optional
The dB attenuation inside the passband (ideally close to 0 for a bandpass filter)
Returns
-------
b : ndarray
coefficients multiplying the current and past inputs (feedforward coefficients)
a : ndarray
coefficients multiplying the past outputs (feedback coefficients)
"""
NyquistFreq = SampleFreq / 2
if (CentralFreq + bandwidth / 2 + transitionWidth > NyquistFreq):
raise ValueError(
"Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width")
CentralFreqNormed = CentralFreq / NyquistFreq
bandwidthNormed = bandwidth / NyquistFreq
transitionWidthNormed = transitionWidth / NyquistFreq
bandpass = [CentralFreqNormed - bandwidthNormed /
2, CentralFreqNormed + bandwidthNormed / 2]
bandstop = [CentralFreqNormed - bandwidthNormed / 2 - transitionWidthNormed,
CentralFreqNormed + bandwidthNormed / 2 + transitionWidthNormed]
print(bandpass, bandstop)
b, a = scipy.signal.iirdesign(bandpass, bandstop, GainPass, GainStop)
return b, a | 0.004044 |
def DbGetDeviceList(self, argin):
""" Get a list of devices for specified server and class.
:param argin: argin[0] : server name
argin[1] : class name
:type: tango.DevVarStringArray
:return: The list of devices for specified server and class.
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetDeviceList()")
server_name = replace_wildcard(argin[0])
class_name = replace_wildcard(argin[1])
return self.db.get_device_list(server_name, class_name) | 0.003731 |
def owner(*paths, **kwargs):
'''
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
if not paths:
return ''
ret = {}
cmd_prefix = ['rpm', '-qf', '--queryformat', '%{name}']
for path in paths:
ret[path] = __salt__['cmd.run_stdout'](
cmd_prefix + [path],
output_loglevel='trace',
python_shell=False
)
if 'not owned' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret | 0.000893 |
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12) | 0.036626 |
def date(self):
"""DATE command.
Coordinated Universal time from the perspective of the usenet server.
It can be used to provide information that might be useful when using
the NEWNEWS command.
See <http://tools.ietf.org/html/rfc3977#section-7.1>
Returns:
The UTC time according to the server as a datetime object.
Raises:
NNTPDataError: If the timestamp can't be parsed.
"""
code, message = self.command("DATE")
if code != 111:
raise NNTPReplyError(code, message)
ts = date.datetimeobj(message, fmt="%Y%m%d%H%M%S")
return ts | 0.003017 |
def paint( self, painter, option, index ):
"""
Overloads the paint method from Qt to perform some additional painting
on items.
:param painter | <QPainter>
option | <QStyleOption>
index | <QModelIndex>
"""
# draw the background
edit = self.parent()
item = edit.item(index.row())
if ( not isinstance(item, XMultiTagCreateItem) ):
if ( item.isSelected() ):
painter.setBrush(edit.highlightColor())
else:
painter.setBrush(edit.tagColor())
painter.drawRect(option.rect)
painter.setBrush(Qt.NoBrush)
painter.setPen(item.foreground().color())
super(XMultiTagDelegate, self).paint(painter, option, index)
# draw the border
item = self.parent().item(index.row())
if ( not isinstance(item, XMultiTagCreateItem) ):
painter.setPen(edit.borderColor())
painter.setBrush(Qt.NoBrush)
painter.drawRect(option.rect)
painter.drawText(option.rect.right() - 14,
option.rect.top() + 1,
16,
16,
Qt.AlignCenter,
'x') | 0.010784 |
def memory_map(self):
"""! @brief MemoryMap object."""
# Lazily construct the memory map.
if self._memory_map is None:
self._build_memory_regions()
self._build_flash_regions()
# Warn if there was no boot memory.
if not self._saw_startup:
LOG.warning("CMSIS-Pack device %s has no identifiable boot memory", self.part_number)
self._memory_map = MemoryMap(self._regions)
return self._memory_map | 0.011321 |
def get_module_logger(moduleName, defaultToVerbose=False):
"""Create a module logger, that can be en/disabled by configuration.
@see: unit.init_logging
"""
# moduleName = moduleName.split(".")[-1]
if not moduleName.startswith(BASE_LOGGER_NAME + "."):
moduleName = BASE_LOGGER_NAME + "." + moduleName
logger = logging.getLogger(moduleName)
# if logger.level == logging.NOTSET and not defaultToVerbose:
# logger.setLevel(logging.INFO) # Disable debug messages by default
return logger | 0.00188 |
def tell_sender_to_start(self):
'''send a start packet (if we haven't sent one in the last second)'''
now = time.time()
if now - self.time_last_start_packet_sent < 1:
return
self.time_last_start_packet_sent = now
if self.log_settings.verbose:
print("DFLogger: Sending start packet")
target_sys = self.log_settings.df_target_system
target_comp = self.log_settings.df_target_component
self.master.mav.remote_log_block_status_send(
target_sys,
target_comp,
mavutil.mavlink.MAV_REMOTE_LOG_DATA_BLOCK_START,
1) | 0.00311 |
def pretty_dict_string(d, indent=0):
"""Pretty output of nested dictionaries.
"""
s = ''
for key, value in sorted(d.items()):
s += ' ' * indent + str(key)
if isinstance(value, dict):
s += '\n' + pretty_dict_string(value, indent+1)
else:
s += '=' + str(value) + '\n'
return s | 0.014409 |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
generator = datagen.DataGenerator.make_copy(self.resolve_option("setup"))
generator.dataset_format = generator.define_data_format()
if bool(self.resolve_option("incremental")) and generator.single_mode_flag:
for i in range(generator.num_examples_act):
self._output.append(Token(generator.generate_example()))
else:
data = generator.generate_examples()
self._output.append(Token(data))
return None | 0.00607 |
def swo_speed_info(self):
"""Retrieves information about the supported SWO speeds.
Args:
self (JLink): the ``JLink`` instance
Returns:
A ``JLinkSWOSpeedInfo`` instance describing the target's supported
SWO speeds.
Raises:
JLinkException: on error
"""
info = structs.JLinkSWOSpeedInfo()
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_SPEED_INFO,
ctypes.byref(info))
if res < 0:
raise errors.JLinkException(res)
return info | 0.00491 |
def includeme(configurator):
"""
Add yaml configuration utilities.
:param pyramid.config.Configurator configurator: pyramid's app configurator
"""
settings = configurator.registry.settings
# lets default it to running path
yaml_locations = settings.get('yaml.location',
settings.get('yml.location', os.getcwd()))
configurator.add_directive('config_defaults', config_defaults)
configurator.config_defaults(yaml_locations)
# reading yml configuration
if configurator.registry['config']:
config = configurator.registry['config']
log.debug('Yaml config created')
# extend settings object
if 'configurator' in config and config.configurator:
_extend_settings(settings, config.configurator)
# run include's
if 'include' in config:
_run_includemes(configurator, config.include)
# let's calla a convenience request method
configurator.add_request_method(
lambda request: request.registry['config'],
name='config', property=True
) | 0.000903 |
def get_isa(self, oneq_type='Xhalves', twoq_type='CZ') -> ISA:
"""
Construct an ISA suitable for targeting by compilation.
This will raise an exception if the requested ISA is not supported by the device.
:param oneq_type: The family of one-qubit gates to target
:param twoq_type: The family of two-qubit gates to target
"""
qubits = [Qubit(id=q.id, type=oneq_type, dead=q.dead) for q in self._isa.qubits]
edges = [Edge(targets=e.targets, type=twoq_type, dead=e.dead) for e in self._isa.edges]
return ISA(qubits, edges) | 0.008432 |
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path)) | 0.008475 |
def downsample_trajectories(trajectories, downsampler, *args, **kwargs):
'''Downsamples all points together, then re-splits into original trajectories.
trajectories : list of 2-d arrays, each representing a trajectory
downsampler(X, *args, **kwargs) : callable that returns indices into X
'''
X = np.vstack(trajectories)
traj_lengths = list(map(len, trajectories))
inds = np.sort(downsampler(X, *args, **kwargs))
new_traj = []
for stop in np.cumsum(traj_lengths):
n = np.searchsorted(inds, stop)
new_traj.append(X[inds[:n]])
inds = inds[n:]
return new_traj | 0.01528 |
def resolve_movie(self, title, year=None):
"""Tries to find a movie with a given title and year"""
r = self.search_movie(title)
return self._match_results(r, title, year) | 0.010256 |
def minizinc_version():
"""Returns the version of the found minizinc executable."""
vs = _run_minizinc('--version')
m = re.findall('version ([\d\.]+)', vs)
if not m:
raise RuntimeError('MiniZinc executable not found.')
return m[0] | 0.011628 |
def _break_reads(self, contig, position, fout, min_read_length=250):
'''Get all reads from contig, but breaks them all at given position (0-based) in the reference. Writes to fout. Currently pproximate where it breaks (ignores indels in the alignment)'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(contig):
seqs = []
if read.pos < position < read.reference_end - 1:
split_point = position - read.pos
if split_point - 1 >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(0, split_point)
sequence.id += '.left'
seqs.append(sequence)
if read.query_length - split_point >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(split_point, read.query_length)
sequence.id += '.right'
seqs.append(sequence)
else:
seqs.append(mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out))
for seq in seqs:
if read.is_reverse:
seq.revcomp()
print(seq, file=fout) | 0.004435 |
def get_address(customer_id, data):
"""
Easier to fetch the addresses of customer and then check one by one.
You can get fancy by using some validation mechanism too
"""
Address = client.model('party.address')
addresses = Address.find(
[('party', '=', customer_id)],
fields=[
'name', 'street', 'street_bis', 'city', 'zip',
'subdivision.code', 'country.code'
]
)
for address in addresses:
if (
address['name'] == data['name'] and
address['street'] == data['street'] and
address['street_bis'] == data['street_bis'] and
address['city'] == data['city'] and
address['zip'] == data['zip'] and
address['subdivision.code'].endswith(data['state']) and
address['country.code'] == data['country']):
return address['id'] | 0.001085 |
def _getOutputElegant(self, **kws):
""" get results from elegant output according to the given keywords,
input parameter format: key = sdds field name tuple, e.g.:
available keywords are:
- 'file': sdds fielname, file = test.sig
- 'data': data array, data = ('s','Sx')
- 'dump': h5file name, if defined, dump data to hdf5 format
"""
datascript = "sddsprintdata.sh"
datapath = self.sim_path
trajparam_list = kws['data']
sddsfile = os.path.expanduser(os.path.join(self.sim_path, kws['file']))
dh = datautils.DataExtracter(sddsfile, *trajparam_list)
dh.setDataScript(datascript)
dh.setDataPath(datapath)
if 'dump' in kws:
dh.setH5file(kws['dump'])
dh.extractData().dump()
data = dh.extractData().getH5Data()
return data | 0.002217 |
def fetchall(self):
"""Fetch all available rows from select result set.
:returns: list of row tuples
"""
result = r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
while len(r) == self.FETCHALL_BLOCKSIZE or not self._received_last_resultset_part:
r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
result.extend(r)
return result | 0.007557 |
def _get_data_segments(channels, start, end, connection):
"""Get available data segments for the given channels
"""
allsegs = io_nds2.get_availability(channels, start, end,
connection=connection)
return allsegs.intersection(allsegs.keys()) | 0.003401 |
def scale(text="", value=0, min=0 ,max=100, step=1, draw_value=True, title="",
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None):
"""
Select a number with a range widget
:param text: text inside window
:type text: str
:param value: current value
:type value: int
:param min: minimum value
:type min: int
:param max: maximum value
:type max: int
:param step: incrementation value
:type step: int
:param draw_value: hide/show cursor value
:type draw_value: bool
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The value selected by the user
:rtype: float
"""
dialog = ZScale(text, value, min, max, step,
draw_value, title, width, height, timeout)
dialog.run()
return dialog.response | 0.002994 |
def parse(self):
""" parse geojson and ensure is collection """
try:
self.parsed_data = json.loads(self.data)
except UnicodeError as e:
self.parsed_data = json.loads(self.data.decode('latin1'))
except Exception as e:
raise Exception('Error while converting response from JSON to python. %s' % e)
if self.parsed_data.get('type', '') != 'FeatureCollection':
raise Exception('GeoJson synchronizer expects a FeatureCollection object at root level')
self.parsed_data = self.parsed_data['features'] | 0.00678 |
def create_grupo_l3(self):
"""Get an instance of grupo_l3 services facade."""
return GrupoL3(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | 0.009091 |
def merge_vertical_lines(lines, tol=TOLERANCE):
"""
This function merges lines segment when they are vertically aligned
:param lines: list of lines coordinates (top, left, bottom, right)
:return: list of merged lines coordinates
"""
if len(lines) == 0:
return []
merged_lines = [lines[0]]
for line in lines[1:]:
last_line = merged_lines[-1]
if line[1] == last_line[1]: # lines are vertically aligned
if line[0] <= last_line[2] + tol: # lines intersect
y0, x0, y1, x1 = merged_lines[-1]
merged_lines[-1] = (y0, x0, line[2], x1)
else:
merged_lines.append(
line
) # lines are vertically aligned but do not intersect
else:
merged_lines.append(line)
return merged_lines | 0.00117 |
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data | 0.00523 |
def create_integer(self, value: int) -> Integer:
"""
Creates a new :class:`ConstantInteger`, adding it to the pool and
returning it.
:param value: The value of the new integer.
"""
self.append((3, value))
return self.get(self.raw_count - 1) | 0.006734 |
def _ctab_property_block(stream):
"""Process properties block of ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine`
"""
line = stream.popleft()
while line != 'M END':
name = line.split()[1]
yield CtabPropertiesBlockLine(name, line)
line = stream.popleft() | 0.002237 |
def _parse_entity(self):
"""Parse an HTML entity at the head of the wikicode string."""
reset = self._head
try:
self._push(contexts.HTML_ENTITY)
self._really_parse_entity()
except BadRoute:
self._head = reset
self._emit_text(self._read())
else:
self._emit_all(self._pop()) | 0.005376 |
def shift(txt, indent = ' ', prepend = ''):
"""Return a list corresponding to the lines of text in the `txt` list
indented by `indent`. Prepend instead the string given in `prepend` to the
beginning of the first line. Note that if len(prepend) > len(indent), then
`prepend` will be truncated (doing better is tricky!). This preserves a
special '' entry at the end of `txt` (see `do_para` for the meaning).
"""
if type(indent) is int:
indent = indent * ' '
special_end = txt[-1:] == ['']
lines = ''.join(txt).splitlines(True)
for i in range(1,len(lines)):
if lines[i].strip() or indent.strip():
lines[i] = indent + lines[i]
if not lines:
return prepend
prepend = prepend[:len(indent)]
indent = indent[len(prepend):]
lines[0] = prepend + indent + lines[0]
ret = [''.join(lines)]
if special_end:
ret.append('')
return ret | 0.007495 |
def effective_value(self):
"""
Read/write |float| representing normalized adjustment value for this
adjustment. Actual values are a large-ish integer expressed in shape
coordinates, nominally between 0 and 100,000. The effective value is
normalized to a corresponding value nominally between 0.0 and 1.0.
Intuitively this represents the proportion of the width or height of
the shape at which the adjustment value is located from its starting
point. For simple shapes such as a rounded rectangle, this intuitive
correspondence holds. For more complicated shapes and at more extreme
shape proportions (e.g. width is much greater than height), the value
can become negative or greater than 1.0.
"""
raw_value = self.actual
if raw_value is None:
raw_value = self.def_val
return self._normalize(raw_value) | 0.002144 |
def osm_polygon_download(query, limit=1, polygon_geojson=1):
"""
Geocode a place and download its boundary geometry from OSM's Nominatim API.
Parameters
----------
query : string or dict
query string or structured query dict to geocode/download
limit : int
max number of results to return
polygon_geojson : int
request the boundary geometry polygon from the API, 0=no, 1=yes
Returns
-------
dict
"""
# define the parameters
params = OrderedDict()
params['format'] = 'json'
params['limit'] = limit
params['dedupe'] = 0 #this prevents OSM from de-duping results so we're guaranteed to get precisely 'limit' number of results
params['polygon_geojson'] = polygon_geojson
# add the structured query dict (if provided) to params, otherwise query
# with place name string
if isinstance(query, str):
params['q'] = query
elif isinstance(query, dict):
# add the query keys in alphabetical order so the URL is the same string
# each time, for caching purposes
for key in sorted(list(query.keys())):
params[key] = query[key]
else:
raise TypeError('query must be a dict or a string')
# request the URL, return the JSON
response_json = nominatim_request(params=params, timeout=30)
return response_json | 0.004389 |
def ensure_local_files():
"""
Ensure that filesystem is setup/filled out in a valid way
"""
if _file_permissions:
if not os.path.isdir(AUTH_DIR):
os.mkdir(AUTH_DIR)
for fn in [CONFIG_FILE]:
contents = load_json_dict(fn)
for key, val in list(_FILE_CONTENT[fn].items()):
if key not in contents:
contents[key] = val
contents_keys = list(contents.keys())
for key in contents_keys:
if key not in _FILE_CONTENT[fn]:
del contents[key]
save_json_dict(fn, contents)
else:
warnings.warn("Looks like you don't have 'read-write' permission to "
"your 'home' ('~') directory") | 0.035656 |
def queue_purge(self, queue, **kwargs):
"""Discard all messages in the queue."""
qsize = mqueue.qsize()
mqueue.queue.clear()
return qsize | 0.011834 |
def debugDumpOneNode(self, output, depth):
"""Dumps debug information for the element node, it is not
recursive """
libxml2mod.xmlDebugDumpOneNode(output, self._o, depth) | 0.010152 |
def get_stp_mst_detail_output_cist_port_edge_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
edge_port = ET.SubElement(port, "edge-port")
edge_port.text = kwargs.pop('edge_port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003407 |
def cancel(self, session):
'''taobao.crm.shopvip.cancel 卖家取消店铺vip的优惠
此接口用于取消VIP优惠'''
request = TOPRequest('taobao.crm.shopvip.cancel')
self.create(self.execute(request, session))
return self.is_success | 0.012 |
def pushd(directory):
"""Change working directories in style and stay organized!
:param directory: Where do you want to go and remember?
:return: saved directory stack
"""
directory = os.path.expanduser(directory)
_saved_paths.insert(0, os.path.abspath(os.getcwd()))
os.chdir(directory)
return [directory] + _saved_paths | 0.002833 |
def LT(self, a, b):
"""Less-than comparison"""
return Operators.ITEBV(256, Operators.ULT(a, b), 1, 0) | 0.017094 |
def parse_string_field(self, field_data):
"""
Parse a string field to dict with options
String value is used as field name. Options can be given after = symbol.
Where key value is separated by : and different options by ;, when no : is used then the value becomes True.
**Example 1:** `field_name`
.. code-block:: python
# Output
{
'field': 'field_name'
}
**Example 3** `field_name=option1:some value;option2: other value`
.. code-block:: python
# Output
{
'field': 'field_name',
'option1': 'some value',
'option2': 'other value',
}
**Example 3** `field_name=option1;option2: other value`
.. code-block:: python
# Output
{
'field': 'field_name',
'option1': True,
'option2': 'other value',
}
:param str field_data:
:return dict:
"""
field_name, *data = field_data.split('=', 1)
field = {
'field': field_name,
}
for option_string in ''.join(data).split(';'):
option, *value = option_string.split(':')
if option.strip():
field[option.strip()] = value[0].strip() if value else True
return field | 0.002819 |
def asDictionary(self):
""" converts the object to a dictionary """
template = {"type" : self._type,
"mapLayerId" : self._mapLayerId}
if not self._gdbVersion is None and\
self._gdbVersion != "":
template['gdbVersion'] = self._gdbVersion
return template | 0.015291 |
def store_json(obj, destination):
"""store_json
Takes in a json-portable object and a filesystem-based destination and stores
the json-portable object as JSON into the filesystem-based destination.
This is blind, dumb, and stupid; thus, it can fail if the object is more
complex than simple dict, list, int, str, etc. type object structures.
"""
with open(destination, 'r+') as FH:
fcntl.lockf(FH, fcntl.LOCK_EX)
json_in = json.loads(FH.read())
json_in.update(obj) # obj overwrites items in json_in...
FH.seek(0)
FH.write(json.dumps(json_in, sort_keys=True, indent=4,
separators=(',', ': '))) | 0.001481 |
def query_recent_most(num=8, recent=30):
'''
Query the records from database that recently updated.
:param num: the number that will returned.
:param recent: the number of days recent.
'''
time_that = int(time.time()) - recent * 24 * 3600
return TabPost.select().where(
TabPost.time_update > time_that
).order_by(
TabPost.view_count.desc()
).limit(num) | 0.004464 |
def clone(self, instance):
'''
Create a shallow clone of an *instance*.
**Note:** the clone and the original instance **does not** have to be
part of the same metaclass.
'''
metaclass = get_metaclass(instance)
metaclass = self.find_metaclass(metaclass.kind)
return metaclass.clone(instance) | 0.010989 |
def get_mean_width(self):
"""
Calculate and return (weighted) mean width (km) of a mesh surface.
The length of each mesh column is computed (summing up the cell widths
in a same column), and the mean value (weighted by the mean cell
length in each column) is returned.
"""
assert 1 not in self.lons.shape, (
"mean width is only defined for mesh of more than "
"one row and more than one column of points")
_, cell_length, cell_width, cell_area = self.get_cell_dimensions()
# compute widths along each mesh column
widths = numpy.sum(cell_width, axis=0)
# compute (weighted) mean cell length along each mesh column
column_areas = numpy.sum(cell_area, axis=0)
mean_cell_lengths = numpy.sum(cell_length * cell_area, axis=0) / \
column_areas
# compute and return weighted mean
return numpy.sum(widths * mean_cell_lengths) / \
numpy.sum(mean_cell_lengths) | 0.001957 |
def check_element(self, elem, check_children=False, next_to_elem=None):
"""
Given an element, check its attributes for references to the three proton attributes ('eid', 'aid' and 'rid').
"""
self.__add_element('eid', elem.attribs, self.__element_ids, elem, next_to_elem)
self.__add_element('aid', elem.attribs, self.__attrib_ids, elem, next_to_elem)
self.__add_element('rid', elem.attribs, self.__repeat_ids, elem, next_to_elem)
if check_children and elem.children:
for child in elem.children:
self.check_element(child, True, next_to_elem) | 0.009631 |
def idna_encode (host):
"""Encode hostname as internationalized domain name (IDN) according
to RFC 3490.
@raise: UnicodeError if hostname is not properly IDN encoded.
"""
if host and isinstance(host, unicode):
try:
host.encode('ascii')
return host, False
except UnicodeError:
uhost = host.encode('idna').decode('ascii')
return uhost, uhost != host
return host, False | 0.004405 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.