text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def generate_py_units(data):
"""Generate the list of units in units.py."""
units = collections.defaultdict(list)
for unit in sorted(data.units, key=lambda a: a.name):
if unit.unit_id in static_data.UNIT_TYPES:
units[unit.race].append(unit)
def print_race(name, race):
print("class %s(enum.IntEnum):" % name)
print(' """%s units."""' % name)
for unit in units[race]:
print(" %s = %s" % (unit.name, unit.unit_id))
print("\n")
print_race("Neutral", sc_common.NoRace)
print_race("Protoss", sc_common.Protoss)
print_race("Terran", sc_common.Terran)
print_race("Zerg", sc_common.Zerg) | 0.01746 |
def _insert_to_array(self, chunk, results):
"""
Enters results arrays into the HDF5 database.
"""
## two result arrs
chunksize = self._chunksize
qrts, invs = results
## enter into db
with h5py.File(self.database.output, 'r+') as io5:
io5['quartets'][chunk:chunk+chunksize] = qrts
## entered as 0-indexed !
if self.params.save_invariants:
if self.checkpoint.boots:
key = "invariants/boot{}".format(self.checkpoint.boots)
io5[key][chunk:chunk+chunksize] = invs
else:
io5["invariants/boot0"][chunk:chunk+chunksize] = invs | 0.007013 |
def csv_list(models, attr, link=False, separator=", "):
"""Return a comma-separated list of models, optionaly with a link."""
values = []
for model in models:
value = getattr(model, attr)
if link and hasattr(model, "get_admin_url") and callable(model.get_admin_url):
value = get_admin_html_link(model, label=value)
values.append(value)
return separator.join(values) | 0.004796 |
def xslt(request):
"""Shows xml output transformed with standard xslt"""
foos = foobar_models.Foo.objects.all()
return render_xslt_to_response('xslt/model-to-xml.xsl', foos, mimetype='text/xml') | 0.009709 |
def sendMultiPart(smtp, gpg_context, sender, recipients, subject, text, attachments):
""" a helper method that composes and sends an email with attachments
requires a pre-configured smtplib.SMTP instance"""
sent = 0
for to in recipients:
if not to.startswith('<'):
uid = '<%s>' % to
else:
uid = to
if not checkRecipient(gpg_context, uid):
continue
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = to
msg['Subject'] = subject
msg["Date"] = formatdate(localtime=True)
msg.preamble = u'This is an email in encrypted multipart format.'
attach = MIMEText(str(gpg_context.encrypt(text.encode('utf-8'), uid, always_trust=True)))
attach.set_charset('UTF-8')
msg.attach(attach)
for attachment in attachments:
with open(attachment, 'rb') as fp:
attach = MIMEBase('application', 'octet-stream')
attach.set_payload(str(gpg_context.encrypt_file(fp, uid, always_trust=True)))
attach.add_header('Content-Disposition', 'attachment', filename=basename('%s.pgp' % attachment))
msg.attach(attach)
# TODO: need to catch exception?
# yes :-) we need to adjust the status accordingly (>500 so it will be destroyed)
smtp.begin()
smtp.sendmail(sender, to, msg.as_string())
smtp.quit()
sent += 1
return sent | 0.004079 |
def _get_orientation_changes(self):
""" Returns a list of the pages that have
orientation changes."""
self.orientation_changes = []
for page in self.pages:
if page.orientation_change is True:
self.orientation_changes.append(page.index)
else:
pass
return self.orientation_changes | 0.005168 |
def html_diff(self, old, new):
"""
Return HTML formatted character-based diff between old and new (used for CS50 IDE).
"""
def html_transition(old_type, new_type):
tags = []
for tag in [("/", old_type), ("", new_type)]:
if tag[1] not in ["+", "-"]:
continue
tags.append("<{}{}>".format(tag[0], "ins" if tag[1] == "+" else "del"))
return "".join(tags)
return self._char_diff(old, new, html_transition, fmt=cgi.escape) | 0.007339 |
def progress(self, *restrictions, display=True):
"""
report progress of populating the table
:return: remaining, total -- tuples to be populated
"""
todo = self._jobs_to_do(restrictions)
total = len(todo)
remaining = len(todo - self.target)
if display:
print('%-20s' % self.__class__.__name__,
'Completed %d of %d (%2.1f%%) %s' % (
total - remaining, total, 100 - 100 * remaining / (total+1e-12),
datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')), flush=True)
return remaining, total | 0.006107 |
def export(self, contentType):
"""
Export message to specified contentType via munge
contentType <str> - eg. "json", "yaml"
"""
cls = munge.get_codec(contentType)
codec = cls()
return codec.dumps(self.__dict__()) | 0.007435 |
def encode(arg, delimiter=None, encodeseq=None, encoded=tuple()):
'''Encode a single argument for the file-system'''
arg = coerce_unicode(arg, _c.FSQ_CHARSET)
new_arg = sep = u''
delimiter, encodeseq = delimiter_encodeseq(
_c.FSQ_DELIMITER if delimiter is None else delimiter,
_c.FSQ_ENCODE if encodeseq is None else encodeseq,
_c.FSQ_CHARSET)
# validate encoded tuple
for enc in encoded:
enc = coerce_unicode(enc, _c.FSQ_CHARSET)
try:
enc = enc.encode('ascii')
except UnicodeEncodeError:
raise FSQEncodeError(errno.EINVAL, u'invalid encoded value: {0}'\
u' non-ascii'.format(enc))
# char-wise encode walk
for seq in arg:
if seq == delimiter or seq == encodeseq or seq in _ENCODED + encoded:
h_val = hex(ord(seq))
# front-pad with zeroes
if 3 == len(h_val):
h_val = sep.join([h_val[:2], u'0', h_val[2:]])
if 4 != len(h_val):
raise FSQEncodeError(errno.EINVAL, u'invalid hex ({0}) for'\
' encode-target: {1}'.format(h_val, seq))
seq = sep.join([encodeseq, h_val[2:]])
new_arg = sep.join([new_arg, seq])
return new_arg | 0.002299 |
def clean_pubmed_identifiers(pmids: Iterable[str]) -> List[str]:
"""Clean a list of PubMed identifiers with string strips, deduplicates, and sorting."""
return sorted({str(pmid).strip() for pmid in pmids}) | 0.00939 |
def at(self, time_str):
"""
Schedule the job every day at a specific time.
Calling this is only valid for jobs scheduled to run
every N day(s).
:param time_str: A string in `XX:YY` format.
:return: The invoked job instance
"""
assert self.unit in ('days', 'hours') or self.start_day
hour, minute = time_str.split(':')
minute = int(minute)
if self.unit == 'days' or self.start_day:
hour = int(hour)
assert 0 <= hour <= 23
elif self.unit == 'hours':
hour = 0
assert 0 <= minute <= 59
self.at_time = datetime.time(hour, minute)
return self | 0.002882 |
def autohelp_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""produces rst from nose help"""
config = Config(parserClass=OptBucket,
plugins=BuiltinPluginManager())
parser = config.getParser(TestProgram.usage())
rst = ViewList()
for line in parser.format_help().split('\n'):
rst.append(line, '<autodoc>')
rst.append('Options', '<autodoc>')
rst.append('-------', '<autodoc>')
rst.append('', '<autodoc>')
for opt in parser:
rst.append(opt.options(), '<autodoc>')
rst.append(' \n', '<autodoc>')
rst.append(' ' + opt.help + '\n', '<autodoc>')
rst.append('\n', '<autodoc>')
node = nodes.section()
node.document = state.document
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(rst, 0, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children | 0.001685 |
def _get_rate(self, value):
"""Return the rate in Hz from the short int value"""
if value == 0:
return 0
else:
return MINIMAL_RATE_HZ * math.exp(value * self._get_factor()) | 0.009091 |
def _prepare_bam_file(bam_file, tmp_dir, config):
"""
Pipe sort by name cmd in case sort by coordinates
"""
sort_mode = _get_sort_order(bam_file, config)
if sort_mode != "queryname":
bam_file = sort(bam_file, config, "queryname")
return bam_file | 0.00361 |
def merge_enums(xml):
'''merge enums between XML files'''
emap = {}
for x in xml:
newenums = []
for enum in x.enum:
if enum.name in emap:
emapitem = emap[enum.name]
# check for possible conflicting auto-assigned values after merge
if (emapitem.start_value <= enum.highest_value and emapitem.highest_value >= enum.start_value):
for entry in emapitem.entry:
# correct the value if necessary, but only if it was auto-assigned to begin with
if entry.value <= enum.highest_value and entry.autovalue == True:
entry.value = enum.highest_value + 1
enum.highest_value = entry.value
# merge the entries
emapitem.entry.extend(enum.entry)
if not emapitem.description:
emapitem.description = enum.description
print("Merged enum %s" % enum.name)
else:
newenums.append(enum)
emap[enum.name] = enum
x.enum = newenums
for e in emap:
# sort by value
emap[e].entry = sorted(emap[e].entry,
key=operator.attrgetter('value'),
reverse=False)
# add a ENUM_END
emap[e].entry.append(MAVEnumEntry("%s_ENUM_END" % emap[e].name,
emap[e].entry[-1].value+1, end_marker=True)) | 0.005215 |
def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from six.moves import zip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print("\n".join([zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))]))
else:
print(row.strip().replace(opts.sep, "\n")) | 0.003155 |
async def get_cred_briefs_by_proof_req_q(self, proof_req_json: str, x_queries_json: str = None) -> str:
"""
A cred-brief aggregates a cred-info and a non-revocation interval. A cred-brief-dict maps
wallet cred-ids to their corresponding cred-briefs.
Return json (cred-brief-dict) object mapping wallet credential identifiers to cred-briefs by
proof request and WQL queries by proof request referent. Return empty dict on no WQL query and
empty requested predicates specification within proof request. Utility util.proof_req2wql_all()
builds WQL to retrieve all cred-briefs for (some or all) cred-def-ids in a proof request.
For each WQL query on an item referent, indy-sdk takes the WQL and the attribute name
and restrictions (e.g., cred def id, schema id, etc.) from its referent. Note that
util.proof_req_attr_referents() maps cred defs and attr names to proof req item referents,
bridging the gap between attribute names and their corresponding item referents.
Raise WalletState if the wallet is closed.
:param proof_req_json: proof request as per Verifier.build_proof_req_json(); e.g.,
::
{
"nonce": "1532429687",
"name": "proof_req",
"version": "0.0",
"requested_predicates": {},
"requested_attributes": {
"17_name_uuid": {
"restrictions": [
{
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag"
}
],
"name": "name"
},
"17_thing_uuid": {
"restrictions": [
{
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag"
}
],
"name": "thing"
}
}
}
:param x_queries_json: json list of extra queries to apply to proof request attribute and predicate
referents; e.g.,
::
{
"17_thing_uuid": { # require attr presence on name 'thing', cred def id from proof req above
"$or": [
{
"attr::name::value": "J.R. 'Bob' Dobbs"
},
{
"attr::thing::value": "slack"
},
]
},
}
:return: json (cred-brief-dict) object mapping wallet cred ids to cred briefs; e.g.,
::
{
"b42ce5bc-b690-43cd-9493-6fe86ad25e85": {
"interval": null,
"cred_info": {
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0",
"rev_reg_id": null,
"attrs": {
"name": "J.R. \"Bob\" Dobbs",
"thing": "slack"
},
"cred_rev_id": null,
"referent": "b42ce5bc-b690-43cd-9493-6fe86ad25e85",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag"
}
},
"d773434a-0080-4e3e-a03b-f2033eae7d75": {
"interval": null,
"cred_info": {
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0",
"rev_reg_id": null,
"attrs": {
"name": "Chicken Hawk",
"thing": "chicken"
},
"cred_rev_id": null,
"referent": "d773434a-0080-4e3e-a03b-f2033eae7d75",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag"
}
}
}
"""
LOGGER.debug(
('HolderProver.get_cred_briefs_by_proof_req_q >>> proof_req_json: %s, x_queries_json: %s'),
proof_req_json,
x_queries_json)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
def _pred_filter(brief):
nonlocal pred_refts
for attr, preds in pred_refts.get(brief['cred_info']['cred_def_id'], {}).items():
if any(Predicate.get(p[0]).value.no(brief['cred_info']['attrs'][attr], p[1]) for p in preds.values()):
return False
return True
rv = {}
item_refts = set()
x_queries = json.loads(x_queries_json or '{}')
for k in x_queries:
x_queries[k] = canon_cred_wql(x_queries[k]) # indy-sdk requires attr name canonicalization
item_refts.add(k)
proof_req = json.loads(proof_req_json)
item_refts.update(uuid for uuid in proof_req['requested_predicates'])
if not x_queries:
item_refts.update(uuid for uuid in proof_req['requested_attributes']) # get all req attrs if no extra wql
handle = await anoncreds.prover_search_credentials_for_proof_req(
self.wallet.handle,
proof_req_json,
json.dumps(x_queries) if x_queries else None)
pred_refts = proof_req_pred_referents(proof_req)
try:
for item_referent in item_refts:
count = Wallet.DEFAULT_CHUNK
while count == Wallet.DEFAULT_CHUNK:
fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(
handle,
item_referent,
Wallet.DEFAULT_CHUNK))
count = len(fetched)
for brief in fetched: # apply predicates from proof req here
if brief['cred_info']['referent'] not in rv and _pred_filter(brief):
rv[brief['cred_info']['referent']] = brief
finally:
await anoncreds.prover_close_credentials_search_for_proof_req(handle)
rv_json = json.dumps(rv)
LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_q <<< %s', rv_json)
return rv_json | 0.004435 |
def trits_from_int(n, pad=1):
# type: (int, Optional[int]) -> List[int]
"""
Returns a trit representation of an integer value.
:param n:
Integer value to convert.
:param pad:
Ensure the result has at least this many trits.
References:
- https://dev.to/buntine/the-balanced-ternary-machines-of-soviet-russia
- https://en.wikipedia.org/wiki/Balanced_ternary
- https://rosettacode.org/wiki/Balanced_ternary#Python
"""
if n == 0:
trits = []
else:
quotient, remainder = divmod(n, 3)
if remainder == 2:
# Lend 1 to the next place so we can make this trit
# negative.
quotient += 1
remainder = -1
trits = [remainder] + trits_from_int(quotient, pad=0)
if pad:
trits += [0] * max(0, pad - len(trits))
return trits | 0.001147 |
def _find_flag_groups(h5f):
"""Return all groups in `h5f` that look like flags
"""
flag_groups = []
def _find(name, obj):
if _is_flag_group(obj):
flag_groups.append(name)
h5f.visititems(_find)
return flag_groups | 0.003891 |
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if not 0.0 < self.subsample <= 1.0:
raise ValueError("subsample must be in ]0; 1] but "
"was %r" % self.subsample)
if not 0.0 < self.learning_rate <= 1.0:
raise ValueError("learning_rate must be within ]0; 1] but "
"was %r" % self.learning_rate)
if not 0.0 <= self.dropout_rate < 1.0:
raise ValueError("dropout_rate must be within [0; 1[, but "
"was %r" % self.dropout_rate)
if self.loss not in LOSS_FUNCTIONS:
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) | 0.002179 |
def get_file_relative_path_by_id(self, id):
"""
Given an id, get the corresponding file info relative path joined with file name.
Parameters:
#. id (string): The file unique id string.
:Returns:
#. relativePath (string): The file relative path joined with file name.
If None, it means file was not found.
"""
for path, info in self.walk_files_info():
if info['id']==id:
return path
# none was found
return None | 0.009208 |
def collect_scripts_from_sources(script_paths, files_deployment, project_path='.', is_package=False, logger=None):
"""
Collects postgres scripts from source files
:param script_paths: list of strings or a string with a relative path to the directory containing files with scripts
:param files_deployment: list of files that need to be harvested. Scripts from there will only be taken
if the path to the file is in script_paths
:param project_path: path to the project source code
:param is_package: are files packaged with pip egg
:param logger: pass the logger object if needed
:return:
"""
logger = logger or logging.getLogger(__name__)
scripts_dict = {}
if script_paths:
if not isinstance(script_paths, list): # can be list of paths or a string, anyways converted to list
script_paths = [script_paths]
if is_package:
for script_path in script_paths:
for file_info in pkg_resources.resource_listdir('pgpm', script_path):
file_content = pkg_resources.resource_string('pgpm', '{0}/{1}'.format(script_path, file_info))\
.decode('utf-8')
if file_content:
scripts_dict[file_info] = file_content
logger.debug('File {0}/{1} collected.'.format(script_path, file_info))
else:
logger.debug('File {0}/{1} not collected as it\'s empty.'.format(script_path, file_info))
else:
if files_deployment: # if specific script to be deployed, only find them
for list_file_name in files_deployment:
list_file_full_path = os.path.join(project_path, list_file_name)
if os.path.isfile(list_file_full_path):
for i in range(len(script_paths)):
if script_paths[i] in list_file_full_path:
file_content = io.open(list_file_full_path, 'r', -1, 'utf-8-sig', 'ignore').read()
if file_content:
scripts_dict[list_file_name] = file_content
logger.debug('File {0} collected.'.format(list_file_full_path))
else:
logger.debug('File {0} not collected as it\'s empty.'.format(list_file_full_path))
else:
logger.debug('File {0} is not found in any of {1} folders, please specify a correct path'
.format(list_file_full_path, script_paths))
else:
for script_path in script_paths:
for subdir, dirs, files in os.walk(script_path):
files = sorted(files)
for file_info in files:
if file_info != settings.CONFIG_FILE_NAME and file_info[0] != '.':
file_content = io.open(os.path.join(subdir, file_info),
'r', -1, 'utf-8-sig', 'ignore').read()
if file_content:
scripts_dict[file_info] = file_content
logger.debug('File {0} collected'.format(os.path.join(subdir, file_info)))
else:
logger.debug('File {0} not collected as it\'s empty.'
.format(os.path.join(subdir, file_info)))
return scripts_dict | 0.006277 |
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update) | 0.008097 |
def get_temp_and_dew(wxdata: str) -> ([str], Number, Number): # type: ignore
"""
Returns the report list and removed temperature and dewpoint strings
"""
for i, item in reversed(list(enumerate(wxdata))):
if '/' in item:
# ///07
if item[0] == '/':
item = '/' + item.lstrip('/')
# 07///
elif item[-1] == '/':
item = item.rstrip('/') + '/'
tempdew = item.split('/')
if len(tempdew) != 2:
continue
valid = True
for j, temp in enumerate(tempdew):
if temp in ['MM', 'XX']:
tempdew[j] = ''
elif not is_possible_temp(temp):
valid = False
break
if valid:
wxdata.pop(i) # type: ignore
return (wxdata, *[make_number(t) for t in tempdew])
return wxdata, None, None | 0.00104 |
def _expand_shorthand(model_formula, variables):
"""Expand shorthand terms in the model formula.
"""
wm = 'white_matter'
gsr = 'global_signal'
rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z'
fd = 'framewise_displacement'
acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables)
tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables)
dv = _get_matches_from_data('^std_dvars$', variables)
dvall = _get_matches_from_data('.*dvars', variables)
nss = _get_matches_from_data('non_steady_state_outlier[0-9]+',
variables)
spikes = _get_matches_from_data('motion_outlier[0-9]+', variables)
model_formula = re.sub('wm', wm, model_formula)
model_formula = re.sub('gsr', gsr, model_formula)
model_formula = re.sub('rps', rps, model_formula)
model_formula = re.sub('fd', fd, model_formula)
model_formula = re.sub('acc', acc, model_formula)
model_formula = re.sub('tcc', tcc, model_formula)
model_formula = re.sub('dv', dv, model_formula)
model_formula = re.sub('dvall', dvall, model_formula)
model_formula = re.sub('nss', nss, model_formula)
model_formula = re.sub('spikes', spikes, model_formula)
formula_variables = _get_variables_from_formula(model_formula)
others = ' + '.join(set(variables) - set(formula_variables))
model_formula = re.sub('others', others, model_formula)
return model_formula | 0.000692 |
def put(self, key, data, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put`, but with an additional
parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
:raises exceptions.IOError: If storing failed or the file could not
be read
"""
self._check_valid_key(key)
if not isinstance(data, bytes):
raise IOError("Provided data is not of type bytes")
return self._put(key, data, self._valid_ttl(ttl_secs)) | 0.003012 |
def _generate_examples(self, imgs_path, csv_path):
"""Yields examples."""
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
# Get keys for each label from csv
label_keys = reader.fieldnames[5:]
data = []
for row in reader:
# Get image based on indicated path in csv
name = row["Path"]
labels = [_LABELS[row[key]] for key in label_keys]
data.append((name, labels))
for name, labels in data:
yield {
"name": name,
"image": os.path.join(imgs_path, name),
"label": labels
} | 0.011475 |
def get_user_info(self, recipient_id, fields=None):
"""Getting information about the user
https://developers.facebook.com/docs/messenger-platform/user-profile
Input:
recipient_id: recipient id to send to
Output:
Response from API as <dict>
"""
params = {}
if fields is not None and isinstance(fields, (list, tuple)):
params['fields'] = ",".join(fields)
params.update(self.auth_args)
request_endpoint = '{0}/{1}'.format(self.graph_url, recipient_id)
response = requests.get(request_endpoint, params=params)
if response.status_code == 200:
return response.json()
return None | 0.002797 |
def mod(self):
""" Cached compiled binary of the Generic_Code class.
To clear cache invoke :meth:`clear_mod_cache`.
"""
if self._mod is None:
self._mod = self.compile_and_import_binary()
return self._mod | 0.007813 |
def setup_logging(verbose=False, logger=None):
"""Setup console logging. Info and below go to stdout, others go to stderr.
:param bool verbose: Print debug statements.
:param str logger: Which logger to set handlers to. Used for testing.
"""
if not verbose:
logging.getLogger('requests').setLevel(logging.WARNING)
format_ = '%(asctime)s %(levelname)-8s %(name)-40s %(message)s' if verbose else '%(message)s'
level = logging.DEBUG if verbose else logging.INFO
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setFormatter(logging.Formatter(format_))
handler_stdout.setLevel(logging.DEBUG)
handler_stdout.addFilter(InfoFilter())
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(logging.Formatter(format_))
handler_stderr.setLevel(logging.WARNING)
root_logger = logging.getLogger(logger)
root_logger.setLevel(level)
root_logger.addHandler(handler_stdout)
root_logger.addHandler(handler_stderr) | 0.001957 |
def eval(self):
""" Evaluates the given input and returns a string containing the
actual filenames represented. If the input token represents multiple
independent files, then eval will return a list of all the input files
needed, otherwise it returns the filenames in a string.
"""
if self.and_or == 'or':
return [Input(self.alias, file, self.cwd, 'and')
for file in self.files]
return ' '.join(self.files) | 0.006122 |
def htmlsafe(unsafe):
"""
Escapes all x(ht)ml control characters.
"""
unsafe = unsafe.replace('&', '&')
unsafe = unsafe.replace('<', '<')
unsafe = unsafe.replace('>', '>')
return unsafe | 0.004484 |
def solve_gcp(V,E):
"""solve_gcp -- solve the graph coloring problem with bisection and fixed-k model
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns tuple with number of colors used, and dictionary mapping colors to vertices
"""
LB = 0
UB = len(V)
color = {}
while UB-LB > 1:
K = int((UB+LB) / 2)
gcp = gcp_fixed_k(V,E,K)
# gcp.Params.OutputFlag = 0 # silent mode
#gcp.Params.Cutoff = .1
gcp.setObjlimit(0.1)
gcp.optimize()
status = gcp.getStatus()
if status == "optimal":
x,z = gcp.data
for i in V:
for k in range(K):
if gcp.getVal(x[i,k]) > 0.5:
color[i] = k
break
# else:
# raise "undefined color for", i
UB = K
else:
LB = K
return UB,color | 0.010194 |
def _assert_is_dictlike(maybe_dictlike, valid_keys):
"""Raises a TypeError iff `maybe_dictlike` is not a dictlike object."""
# This covers a common mistake when people use incorrect dictionary nesting
# for initializers / partitioners etc. The previous error message was quite
# opaque, this should be much clearer.
if not hasattr(maybe_dictlike, "__getitem__"):
raise TypeError(
"Expected a dict-like object with possible keys %s, received %s" %
(str(valid_keys), str(maybe_dictlike))) | 0.011628 |
def make_param_dict_from_file(self,path_to_params):
"""
make param dict from a file on disk
"""
# then we were given a path to a parameter file
param_list = list(csv.reader(open(path_to_params,"rb")))
# delete empty elements (if any)
param_file = [x for x in param_list if x != []]
# make dict of [wavenames] = raw_params
name_list = []
param_list = []
# get header names for each param (names of param_list columns)
param_colnames = param_file[0][1:] # 0th element is "Name" or "Wavename"
# start from 1. (row 0 is the header)
for i in np.arange(1, len(param_file)):
name_list.append(param_file[i][0])
param_list.append(param_file[i][1:])
# remove ' ' blank spaces from param_list
param_list = [[x.strip() for x in y] for y in param_list]
param_dict = {}
# i loops through param_colnames, j loops thru param values per wave
for i in np.arange(0, len(param_colnames)):
param_dict[param_colnames[i]] = []
for j in np.arange(0,len(name_list)):
param_dict[param_colnames[i]].append(param_list[j][i])
# now we have param_dict, and name_list
self._param_dict = param_dict
self._row_names = name_list | 0.005243 |
def _diff(self, x, th, eps):
"""
Differentiation function.
Numerical approximation of a Rosenblatt transformation created from
copula formulation.
"""
foo = lambda y: self.igen(numpy.sum(self.gen(y, th), 0), th)
out1 = out2 = 0.
sign = 1 - 2*(x > .5).T
for I in numpy.ndindex(*((2,)*(len(x)-1)+(1,))):
eps_ = numpy.array(I)*eps
x_ = (x.T + sign*eps_).T
out1 += (-1)**sum(I)*foo(x_)
x_[-1] = 1
out2 += (-1)**sum(I)*foo(x_)
out = out1/out2
return out | 0.006656 |
def delete_pipeline_stage(self, pipeline_key, stage_key, sort_by = None):
'''Deletes a stage in the pipeline by stage key and pipeline key
Args:
pipeline_key key for pipeline
stage_key key for stage
sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'
returns (status code for the GET request, dict of op report)
'''
if not (pipeline_key and stage_key):
return requests.codes.bad_request, None
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.stages_suffix,
stage_key
])
code, data = self._req('delete', uri)
return code, data | 0.041796 |
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['levelId'] = self._level_default
self._my_map['startTime'] = self._start_time_default
self._my_map['gradeSystemId'] = self._grade_system_default
self._my_map['itemsShuffled'] = self._items_shuffled_default
self._my_map['scoreSystemId'] = self._score_system_default
self._my_map['deadline'] = self._deadline_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['duration'] = self._duration_default
self._my_map['assessmentId'] = str(kwargs['assessment_id'])
self._my_map['itemsSequential'] = self._items_sequential_default | 0.002488 |
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex) | 0.002907 |
def get_region_bed(region, items, out_file, want_gzip=True):
"""Retrieve BED file of regions to analyze, either single or multi-region.
"""
variant_regions = bedutils.population_variant_regions(items, merged=True)
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if not target:
raise ValueError("Need BED input for strelka2 regions: %s %s" % (region, target))
if not isinstance(target, six.string_types) or not os.path.isfile(target):
chrom, start, end = target
target = "%s-regions.bed" % utils.splitext_plus(out_file)[0]
with file_transaction(items[0], target) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
out_file = target
if want_gzip:
out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"])
return out_file | 0.00324 |
def strptime(cls, date_string, fmt):
"""
This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`,
and used to parse date strings into date object.
`ValueError` is raised if the date_string and format can’t be
parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a
complete list of formatting directives, see :doc:`/directives`.
:param date_string:
:param fmt:
:return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format
:rtype: :py:class:`khayyam.JalaiDate`
"""
# noinspection PyUnresolvedReferences
result = cls.formatterfactory(fmt).parse(date_string)
result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')}
return cls(**result) | 0.005903 |
def _decode_region(decoder, region, corrections, shrink):
"""Decodes and returns the value in a region.
Args:
region (DmtxRegion):
Yields:
Decoded or None: The decoded value.
"""
with _decoded_matrix_region(decoder, region, corrections) as msg:
if msg:
# Coordinates
p00 = DmtxVector2()
p11 = DmtxVector2(1.0, 1.0)
dmtxMatrix3VMultiplyBy(
p00,
region.contents.fit2raw
)
dmtxMatrix3VMultiplyBy(p11, region.contents.fit2raw)
x0 = int((shrink * p00.X) + 0.5)
y0 = int((shrink * p00.Y) + 0.5)
x1 = int((shrink * p11.X) + 0.5)
y1 = int((shrink * p11.Y) + 0.5)
return Decoded(
string_at(msg.contents.output),
Rect(x0, y0, x1 - x0, y1 - y0)
)
else:
return None | 0.001079 |
def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):
'''Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==1000
'''
rendered_list = []
i = 0
total_attempts = 0
while True:
if i >= cnt:
break
if total_attempts > cnt * self.unique_attempts_factor:
raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness")
s = self.render(**kwargs)
if unique:
if not s in rendered_list:
rendered_list.append(s)
i += 1
else:
rendered_list.append(s)
i += 1
total_attempts += 1
# Optionally trigger the progress indicator to inform others about our progress
if progress_callback and callable(progress_callback):
progress_callback(i, cnt)
return rendered_list | 0.004847 |
def download(url: str, filename: str,
skip_cert_verify: bool = True) -> None:
"""
Downloads a URL to a file.
Args:
url: URL to download from
filename: file to save to
skip_cert_verify: skip SSL certificate check?
"""
log.info("Downloading from {} to {}", url, filename)
# urllib.request.urlretrieve(url, filename)
# ... sometimes fails (e.g. downloading
# https://www.openssl.org/source/openssl-1.1.0g.tar.gz under Windows) with:
# ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) # noqa
# ... due to this certificate root problem (probably because OpenSSL
# [used by Python] doesn't play entirely by the same rules as others?):
# https://stackoverflow.com/questions/27804710
# So:
ctx = ssl.create_default_context() # type: ssl.SSLContext
if skip_cert_verify:
log.debug("Skipping SSL certificate check for " + url)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with urllib.request.urlopen(url, context=ctx) as u, open(filename,
'wb') as f: # noqa
f.write(u.read()) | 0.00082 |
def store(config, archiver, revision, stats):
"""
Store a revision record within an archiver folder.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:param revision: The revision ID
:type revision: ``str``
:param stats: The collected data
:type stats: ``dict``
:return: The absolute path to the created file
:rtype: ``str``
:rtype: `pathlib.Path`
"""
root = pathlib.Path(config.cache_path) / archiver.name
if not root.exists():
logger.debug("Creating wily cache")
root.mkdir()
# fix absolute path references.
if config.path != ".":
for operator, operator_data in list(stats["operator_data"].items()):
if operator_data:
new_operator_data = operator_data.copy()
for k, v in list(operator_data.items()):
new_key = os.path.relpath(str(k), str(config.path))
del new_operator_data[k]
new_operator_data[new_key] = v
del stats["operator_data"][operator]
stats["operator_data"][operator] = new_operator_data
logger.debug(f"Creating {revision.key} output")
filename = root / (revision.key + ".json")
if filename.exists():
raise RuntimeError(f"File {filename} already exists, index may be corrupt.")
with open(filename, "w") as out:
out.write(json.dumps(stats, indent=2))
return filename | 0.003157 |
def parse(self, callback_data: str) -> typing.Dict[str, str]:
"""
Parse data from the callback data
:param callback_data:
:return:
"""
prefix, *parts = callback_data.split(self.sep)
if prefix != self.prefix:
raise ValueError("Passed callback data can't be parsed with that prefix.")
elif len(parts) != len(self._part_names):
raise ValueError('Invalid parts count!')
result = {'@': prefix}
result.update(zip(self._part_names, parts))
return result | 0.005357 |
def set(self, name, valu):
'''
Set a name in the SlabDict.
Args:
name (str): The key name.
valu (obj): A msgpack compatible value.
Returns:
None
'''
byts = s_msgpack.en(valu)
lkey = self.pref + name.encode('utf8')
self.slab.put(lkey, byts, db=self.db)
self.info[name] = valu | 0.005208 |
def destroy_volume_snapshot(volume_id, snapshot_id, profile, **libcloud_kwargs):
'''
Destroy a volume snapshot.
:param volume_id: Volume ID from which the snapshot belongs
:type volume_id: ``str``
:param snapshot_id: Volume Snapshot ID from which to destroy
:type snapshot_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_volume_snapshot method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_compute.destroy_volume_snapshot snap1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
volume = _get_by_id(conn.list_volumes(), volume_id)
snapshot = _get_by_id(conn.list_volume_snapshots(volume), snapshot_id)
return conn.destroy_volume_snapshot(snapshot, **libcloud_kwargs) | 0.003158 |
def get_entity_by_netid(self, netid):
"""
Returns a restclients.Entity object for the given netid. If the
netid isn't found, or if there is an error communicating with the PWS,
a DataFailureException will be thrown.
"""
if not self.valid_uwnetid(netid):
raise InvalidNetID(netid)
url = "{}/{}.json".format(ENTITY_PREFIX, netid.lower())
response = DAO.getURL(url, {"Accept": "application/json"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._entity_from_json(response.data) | 0.003135 |
def _is_tp(pkt):
"""Returns true if pkt is using SOMEIP-TP, else returns false."""
tp = [SOMEIP.TYPE_TP_REQUEST, SOMEIP.TYPE_TP_REQUEST_NO_RET,
SOMEIP.TYPE_TP_NOTIFICATION, SOMEIP.TYPE_TP_RESPONSE,
SOMEIP.TYPE_TP_ERROR]
if isinstance(pkt, Packet):
return pkt.msg_type in tp
else:
return pkt[15] in tp | 0.005195 |
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True) | 0.001189 |
def ensure_traj(traj):
r"""Makes sure that traj is a trajectory (array of float)
"""
if is_float_matrix(traj) or is_bool_matrix(traj):
return traj
elif is_float_vector(traj):
return traj[:,None]
else:
try:
arr = np.array(traj)
arr = ensure_dtype_float(arr)
if is_float_matrix(arr):
return arr
if is_float_vector(arr):
return arr[:,None]
else:
raise TypeError('Argument traj cannot be cast into a two-dimensional array. Check type.')
except:
raise TypeError('Argument traj is not a trajectory - only float-arrays or list of float-arrays are allowed. Types is %s' % type(traj)) | 0.008021 |
def sg_expand_dims(tensor, opt):
r"""Inserts a new axis.
See tf.expand_dims() in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : Dimension to expand. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=-1)
return tf.expand_dims(tensor, opt.axis, name=opt.name) | 0.004673 |
def rm(self, line):
"""
Remove all occurrences of 'line' from contents
where 'line' is an entire line or a list of lines.
Return true if the file was changed by rm(), False otherwise.
Multi-line strings are converted to a list delimited by new lines.
:param line: String, or List of Strings; each string represents an entire line to be removed from file.
:return: Boolean, whether contents were changed.
"""
self.log('rm({0})'.format(line))
if line is False:
return False
if isinstance(line, str):
line = line.split('\n')
if not isinstance(line, list):
raise TypeError("Parameter 'line' not a 'string' or 'list', is {0}".format(type(line)))
local_changes = False
for this in line:
if this in self.contents:
while this in self.contents:
self.log('Removed "{0}" from position {1}'.format(this, self.contents.index(this)))
self.contents.remove(this)
self.changed = local_changes = True
else:
self.log('"{0}" not in {1}'.format(this, self.filename))
if self.sorted and local_changes:
self.sort()
return local_changes | 0.003814 |
def _load_config(self):
"""Read the configuration file and load it into memory."""
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | 0.010582 |
def iyang(imgIn, krnl, imgSeg, Cnt, itr=5):
'''partial volume correction using iterative Yang method
imgIn: input image which is blurred due to the PSF of the scanner
krnl: shift invariant kernel of the PSF
imgSeg: segmentation into regions starting with 0 (e.g., background) and then next integer numbers
itr: number of iteration (default 5)
'''
dim = imgIn.shape
m = np.int32(np.max(imgSeg))
m_a = np.zeros(( m+1, itr ), dtype=np.float32)
for jr in range(0,m+1):
m_a[jr, 0] = np.mean( imgIn[imgSeg==jr] )
# init output image
imgOut = np.copy(imgIn)
# iterative Yang algorithm:
for i in range(0, itr):
if Cnt['VERBOSE']: print 'i> PVC Yang iteration =', i
# piece-wise constant image
imgPWC = imgOut
imgPWC[imgPWC<0] = 0
for jr in range(0,m+1):
imgPWC[imgSeg==jr] = np.mean( imgPWC[imgSeg==jr] )
#> blur the piece-wise constant image using either:
#> (1) GPU convolution with a separable kernel (x,y,z), or
#> (2) CPU, Python-based convolution
if 'CCARCH' in Cnt and 'compute' in Cnt['CCARCH']:
#> convert to dimensions of GPU processing [y,x,z]
imin_d = np.transpose(imgPWC, (1, 2, 0))
imout_d = np.zeros(imin_d.shape, dtype=np.float32)
improc.convolve(imout_d, imin_d, krnl, Cnt)
imgSmo = np.transpose(imout_d, (2,0,1))
else:
hxy = np.outer(krnl[1,:], krnl[2,:])
hxyz = np.multiply.outer(krnl[0,:], hxy)
imgSmo = ndi.convolve(imgPWC, hxyz, mode='constant', cval=0.)
# correction factors
imgCrr = np.ones(dim, dtype=np.float32)
imgCrr[imgSmo>0] = imgPWC[imgSmo>0] / imgSmo[imgSmo>0]
imgOut = imgIn * imgCrr;
for jr in range(0,m+1):
m_a[jr, i] = np.mean( imgOut[imgSeg==jr] )
return imgOut, m_a | 0.018163 |
def makesubatoffset(self, bitoffset, *, _offsetideal=None):
"""Create a copy of this promise with an offset, and use it as this promise's child.
If this promise's primitive is being merged with another
primitive, a new subpromise may be required to keep track of
the new offset of data coming from the new primitive.
Args:
bitoffset: An integer offset of the data in the new primitive.
_offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control.
Returns:
A TDOPromise registered with this promise, and with the
correct offset.
"""
if _offsetideal is None:
_offsetideal = bitoffset
if bitoffset is 0:
return self
newpromise = TDOPromise(
self._chain,
self._bitstart + bitoffset,
self._bitlength,
_parent=self,
bitstartselective=self._bitstartselective+_offsetideal
)
self._addsub(newpromise, 0)
return newpromise | 0.003416 |
def add_tags(self):
"""Add a Vorbis comment block to the file."""
if self.tags is None:
self.tags = VCFLACDict()
self.metadata_blocks.append(self.tags)
else:
raise FLACVorbisError("a Vorbis comment already exists") | 0.007299 |
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image (it will be converted to floats in [0, 1]).
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope, default_name="distorted_bounding_box_crop",
values=[image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox | 0.001722 |
def _read_opt_ilnp(self, code, *, desc):
"""Read HOPOPT ILNP Nonce option.
Structure of HOPOPT ILNP Nonce option [RFC 6744]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Option Type | Option Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ Nonce Value /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.ilnp.type Option Type
0 0 hopopt.ilnp.type.value Option Number
0 0 hopopt.ilnp.type.action Action (10)
0 2 hopopt.ilnp.type.change Change Flag (0)
1 8 hopopt.ilnp.length Length of Option Data
2 16 hopopt.ilnp.value Nonce Value
"""
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
_nval = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
value=_nval,
)
return opt | 0.002008 |
def xyz2lonlat(x__, y__, z__):
"""Get longitudes from cartesian coordinates.
"""
R = 6370997.0
lons = da.rad2deg(da.arccos(x__ / da.sqrt(x__ ** 2 + y__ ** 2))) * da.sign(y__)
lats = da.sign(z__) * (90 - da.rad2deg(da.arcsin(da.sqrt(x__ ** 2 + y__ ** 2) / R)))
return lons, lats | 0.009934 |
def _rc_sinter(self, src, *args):
"""
Returns the members of the set resulting from the difference between
the first set and all the successive sets.
"""
args = list_or_args(src, args)
src_set = self.smembers(args.pop(0))
if src_set is not set([]):
for key in args:
src_set.intersection_update(self.smembers(key))
return src_set | 0.004762 |
def add_sections(app, doctree, fromdocname):
"""Add section titles to the needs as additional attributes that can
be used in tables and filters"""
needs = getattr(app.builder.env, 'needs_all_needs', {})
for key, need_info in needs.items():
sections = get_sections(need_info)
need_info['sections'] = sections
need_info['section_name'] = sections[0] if sections else "" | 0.002457 |
def get_mrca_idx_from_tip_labels(self, names=None, wildcard=None, regex=None):
"""
Returns the node idx label of the most recent common ancestor node
for the clade that includes the selected tips. Arguments can use fuzzy
name matching: a list of tip names, wildcard selector, or regex string.
"""
if not any([names, wildcard, regex]):
raise ToytreeError("at least one argument required")
node = fuzzy_match_tipnames(
self, names, wildcard, regex, True, False)
return node.idx | 0.005319 |
def match(self, filepath):
"""
The function to check file.
Should return True if match, False otherwise.
"""
# no extension?
if filepath.find(".") == -1:
return False
# match extension
return filepath.lower().split(".")[-1] in self.__extensions | 0.006231 |
def get_overlapping_values(plates):
"""
Need to find where in the tree the two plates intersect, e.g.
We are given as input plates D, E, whose positions in the tree are:
root -> A -> B -> C -> D
root -> A -> B -> E
The results should then be the cartesian product between C, D, E looped over A and B
If there's a shared plate in the hierarchy, we need to join on this shared plate, e.g.:
[self.plates[p].values for p in plate_ids][0] =
[(('house', '1'), ('location', 'hallway'), ('wearable', 'A')),
(('house', '1'), ('location', 'kitchen'), ('wearable', 'A'))]
[self.plates[p].values for p in plate_ids][1] =
[(('house', '1'), ('scripted', '15')),
(('house', '1'), ('scripted', '13'))]
Result should be one stream for each of:
[(('house', '1'), ('location', 'hallway'), ('wearable', 'A'), ('scripted', '15)),
(('house', '1'), ('location', 'hallway'), ('wearable', 'A'), ('scripted', '13)),
(('house', '1'), ('location', 'kitchen'), ('wearable', 'A'), ('scripted', '15)),
(('house', '1'), ('location', 'kitchen'), ('wearable', 'A'), ('scripted', '13))]
:param plates: The input plates
:return: The plate values
:type plates: list[Plate] | list[Plate]
"""
if not plates:
return None
if len(plates) == 1:
return plates[0].values
if len(plates) > 2:
raise NotImplementedError
# First check for the simple case where one of the plates has no parent
# and does not share meta data with the other
plates_sorted = sorted(plates, key=lambda item: len(item.ancestor_plates))
if plates_sorted[0].is_root:
if plates_sorted[0].meta_data_id not in plates_sorted[1].ancestor_meta_data_ids:
return map(lambda x: tuple(itertools.chain(*x)), itertools.product(plates[0].values, plates[1].values))
# Get all of the ancestors zipped together, padded with None
ancestors = deque(itertools.izip_longest(*(p.ancestor_plates for p in plates)))
last_values = []
while len(ancestors) > 0:
current = ancestors.popleft()
if current[0] == current[1]:
# Plates are identical, take all values valid for matching parents
if last_values:
raise NotImplementedError
else:
last_values.extend(current[0].values)
elif current[0] is not None and current[1] is not None \
and current[0].meta_data_id == current[1].meta_data_id:
# Not identical, but same meta data id. Take all overlapping values valid for matching parents
if last_values:
raise NotImplementedError
else:
raise NotImplementedError
else:
# Different plates, take cartesian product of values with matching parents.
# Note that one of them may be none
if last_values:
tmp = []
for v in last_values:
# Get the valid ones based on v
# valid = [filter(lambda x: all(xx in v for xx in x[:-1]), c.values)
# for c in current if c is not None]
valid = [filter(lambda x: all(vv in x for vv in v), c.values)
for c in current if c is not None]
# Strip out v from the valid ones
stripped = [map(lambda y: tuple(itertools.chain(*(yy for yy in y if yy not in v))), val)
for val in valid]
# Get the cartesian product. Note that this still works if one of the current is None
prod = list(itertools.product(*stripped))
# Now update the last values be the product with v put back in
new_values = [v + p for p in prod]
if new_values:
tmp.append(new_values)
last_values = list(itertools.chain(*tmp))
if not last_values:
raise ValueError("Plate value computation failed - possibly there were no shared plate values")
else:
raise NotImplementedError
if not last_values:
raise ValueError("Plate value computation failed - possibly there were no shared plate values")
return last_values | 0.004697 |
def remove_xml_element_string(name, content):
""" Remove XML elements from a string """
ET.register_namespace("", "http://soap.sforce.com/2006/04/metadata")
tree = ET.fromstring(content)
tree = remove_xml_element(name, tree)
clean_content = ET.tostring(tree, encoding=UTF8)
return clean_content | 0.003145 |
def get(self, **params):
'''
Returns details for a specific airport.
.. code-block:: python
amadeus.reference_data.location('ALHR').get()
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v1/reference-data/locations/{0}'
.format(self.location_id), **params) | 0.00463 |
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
}) | 0.012365 |
def ref(function, callback=None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback) | 0.001828 |
def find_videos_by_playlist(self, playlist_id, page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=71
"""
url = 'https://openapi.youku.com/v2/playlists/videos.json'
params = {
'client_id': self.client_id,
'playlist_id': playlist_id,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | 0.004425 |
def ready(self):
"""Initialisation for django-ddp (setup lookups and signal handlers)."""
if not settings.DATABASES:
raise ImproperlyConfigured('No databases configured.')
for (alias, conf) in settings.DATABASES.items():
engine = conf['ENGINE']
if engine not in [
'django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2',
]:
warnings.warn(
'Database %r uses unsupported %r engine.' % (
alias, engine,
),
UserWarning,
)
self.api = autodiscover()
self.api.ready() | 0.004202 |
def execute(self, uri, namespace, action, timeout=2, **kwargs):
"""Executes a given action with optional arguments.
The execution of an action of an UPnP/TR64 device needs more than just the name of an action. It needs the
control URI which is called to place the action and also the namespace aka service type is needed. The
namespace defines the scope or service type of the given action, the same action name can appear in different
namespaces.
The way how to obtain the needed information's is either through the documentation of the vendor of the
device. Or through a discovery requests which return's the URL to the root device description XML.
:param str uri: the control URI, for example ``/upnp/control/hosts``
:param str namespace: the namespace for the given action, for example ``urn:dslforum-org:service:Hosts:1``
:param str action: the name of the action to call, for example ``GetGenericHostEntry``
:param float timeout: the timeout to wait for the action to be executed
:param kwargs: optional arguments for the given action, depends if the action needs parameter. The arguments
are given as dict where the key is the parameter name and the value the value of the parameter.
:type kwargs: dict[str, str]
:return: returns the results of the action, if any. The results are structured as dict where the key is the
name of the result argument and the value is the value of the result.
:rtype: dict[str,str]
:raises ValueError: if parameters are not set correctly
:raises requests.exceptions.ConnectionError: when the action can not be placed on the device
:raises requests.exceptions.ConnectTimeout: when download time out
Example:
::
device = DeviceTR64(...)
device.execute("/upnp/control/hosts", "urn:dslforum-org:service:Hosts:1",
"GetGenericHostEntry", {"NewIndex": 1})
{'NewActive': '0', 'NewIPAddress': '192.168.0.23', 'NewMACAddress': '9C:20:7B:E7:FF:5F',
'NewInterfaceType': 'Ethernet', 'NewHostName': 'Apple-TV', 'NewAddressSource': 'DHCP',
'NewLeaseTimeRemaining': '0'}
.. seealso::
`Additional short explanation of the UPnP protocol <http://www.upnp-hacks.org/upnp.html>`_
:class:`~simpletr64.Discover`, :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions`,
:meth:`~simpletr64.DeviceTR64.loadSCPD`
"""
if not uri:
raise ValueError("No action URI has been defined.")
if not namespace:
raise ValueError("No namespace has been defined.")
if not action:
raise ValueError("No action has been defined.")
# soap headers
header = {'Content-Type': 'text/xml; charset="UTF-8"',
'Soapaction': '"' + namespace + "#" + action + '"'}
# build SOAP body
body = '''<?xml version="1.0" encoding="UTF-8"?>
<s:Envelope
s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<s:Header/>
<s:Body>\n'''
body += " <u:" + action + ' xmlns="' + namespace + '">\n'
arguments = {}
for key in kwargs.keys():
body += " <" + key + ">" + str(kwargs[key]) + "</" + key + ">\n"
arguments[key] = str(kwargs[key])
body += " </u:" + action + ">\n"
body += '''</s:Body>
</s:Envelope>'''
# setup proxies
proxies = {}
if self.__httpsProxy:
proxies = {"https": self.__httpsProxy}
if self.__httpProxy:
proxies = {"http": self.__httpProxy}
# setup authentication
auth = None
if self.__password:
auth = HTTPDigestAuth(self.__username, self.__password)
# build the URL
location = self.__protocol + "://" + self.__hostname + ":" + str(self.port) + uri
# Post http request
request = requests.post(location, data=body, headers=header, auth=auth, proxies=proxies, timeout=float(timeout),
verify=self.__verify)
if request.status_code != 200:
errorStr = DeviceTR64._extractErrorString(request)
raise ValueError('Could not execute "' + action + str(arguments) + '": ' + str(request.status_code) +
' - ' + request.reason + " -- " + errorStr)
# parse XML return
try:
root = ET.fromstring(request.text.encode('utf-8'))
except Exception as e:
raise ValueError("Can not parse results for the action: " + str(e))
# iterate in the XML structure to get the action result
actionNode = root[0][0]
# we need to remove XML namespace for the action node
namespaceLength = len(namespace) + 2 # add braces
tag = actionNode.tag[namespaceLength:]
if tag != (action + "Response"):
raise ValueError('Soap result structure is wrong, expected action "' + action +
'Response" got "' + tag + '".')
# pack all the received results
results = {}
for resultNode in actionNode:
results[resultNode.tag] = resultNode.text
return results | 0.00454 |
def to_iris(dataarray):
""" Convert a DataArray into a Iris Cube
"""
# Iris not a hard dependency
import iris
from iris.fileformats.netcdf import parse_cell_methods
dim_coords = []
aux_coords = []
for coord_name in dataarray.coords:
coord = encode(dataarray.coords[coord_name])
coord_args = _get_iris_args(coord.attrs)
coord_args['var_name'] = coord_name
axis = None
if coord.dims:
axis = dataarray.get_axis_num(coord.dims)
if coord_name in dataarray.dims:
try:
iris_coord = iris.coords.DimCoord(coord.values, **coord_args)
dim_coords.append((iris_coord, axis))
except ValueError:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
else:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
args = _get_iris_args(dataarray.attrs)
args['var_name'] = dataarray.name
args['dim_coords_and_dims'] = dim_coords
args['aux_coords_and_dims'] = aux_coords
if 'cell_methods' in dataarray.attrs:
args['cell_methods'] = \
parse_cell_methods(dataarray.attrs['cell_methods'])
masked_data = duck_array_ops.masked_invalid(dataarray.data)
cube = iris.cube.Cube(masked_data, **args)
return cube | 0.000694 |
def set_storage_container_acl(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Set a storage container's acl
CLI Example:
.. code-block:: bash
salt-cloud -f set_storage_container my-azure name=mycontainer
name:
Name of existing container.
signed_identifiers:
SignedIdentifers instance
blob_public_access:
Optional. Possible values include: container, blob
lease_id:
If specified, set_storage_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_storage_container function must be called with -f or --function.'
)
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
try:
data = storage_conn.set_container_acl(
container_name=kwargs['name'],
signed_identifiers=kwargs.get('signed_identifiers', None),
x_ms_blob_public_access=kwargs.get('blob_public_access', None),
x_ms_lease_id=kwargs.get('lease_id', None),
)
return {'Success': 'The storage container was successfully updated'}
except AzureConflictHttpError:
raise SaltCloudSystemExit('There was a conflict.') | 0.001504 |
def show(self, start_date, end_date):
"""setting suggested name to something readable, replace backslashes
with dots so the name is valid in linux"""
# title in the report file name
vars = {"title": _("Time track"),
"start": start_date.strftime("%x").replace("/", "."),
"end": end_date.strftime("%x").replace("/", ".")}
if start_date != end_date:
filename = "%(title)s, %(start)s - %(end)s.html" % vars
else:
filename = "%(title)s, %(start)s.html" % vars
self.dialog.set_current_name(filename)
response = self.dialog.run()
if response != gtk.ResponseType.OK:
self.emit("report-chooser-closed")
self.dialog.destroy()
self.dialog = None
else:
self.on_save_button_clicked() | 0.002323 |
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform | 0.00202 |
def iter_bases(bases):
"""
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
"""
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
# Loop over sequences
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
# Select a good head
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError('Cannot create a consistent method '
'resolution order (MRO) for bases %s' %
', '.join([base.__name__ for base in bases]))
# Yield this base class
yield head
# Remove base class from all the other sequences
for seq in sequences:
if seq[0] == head:
del seq[0] | 0.001817 |
def validate_pai_trial_conifg(experiment_config):
'''validate the trial config in pai platform'''
if experiment_config.get('trainingServicePlatform') == 'pai':
if experiment_config.get('trial').get('shmMB') and \
experiment_config['trial']['shmMB'] > experiment_config['trial']['memoryMB']:
print_error('shmMB should be no more than memoryMB!')
exit(1) | 0.0075 |
def hold_absent(name, snapshot, recursive=False):
'''
ensure hold is absent on the system
name : string
name of hold
snapshot : string
name of snapshot
recursive : boolean
recursively releases a hold with the given tag on the snapshots of all descendent file systems.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
## log configuration
log.debug('zfs.hold_absent::%s::config::snapshot = %s',
name, snapshot)
log.debug('zfs.hold_absent::%s::config::recursive = %s',
name, recursive)
## check we have a snapshot/tag name
if not __utils__['zfs.is_snapshot'](snapshot):
ret['result'] = False
ret['comment'] = 'invalid snapshot name: {0}'.format(snapshot)
return ret
if __utils__['zfs.is_snapshot'](name) or \
__utils__['zfs.is_bookmark'](name) or \
name == 'error':
ret['result'] = False
ret['comment'] = 'invalid tag name: {0}'.format(name)
return ret
## release hold if required
holds = __salt__['zfs.holds'](snapshot)
if name in holds:
## NOTE: hold found for snapshot, release it
if not __opts__['test']:
mod_res = __salt__['zfs.release'](name, snapshot, **{'recursive': recursive})
else:
mod_res = OrderedDict([('released', True)])
ret['result'] = mod_res['released']
if ret['result']:
ret['changes'] = {snapshot: {name: 'released'}}
ret['comment'] = 'hold {0} released'.format(
name,
)
else:
ret['comment'] = 'failed to release hold {0}'.format(
name,
)
if 'error' in mod_res:
ret['comment'] = mod_res['error']
elif 'error' in holds:
## NOTE: we have an error
ret['result'] = False
ret['comment'] = holds['error']
else:
## NOTE: no hold found with name for snapshot
ret['comment'] = 'hold {0} is absent'.format(
name,
)
return ret | 0.004208 |
def b58encode_int(i, default_one=True):
'''Encode an integer using Base58'''
if not i and default_one:
return alphabet[0]
string = ""
while i:
i, idx = divmod(i, 58)
string = alphabet[idx] + string
return string | 0.003922 |
def _parse_blob(self):
"""Parse a blob command."""
lineno = self.lineno
mark = self._get_mark_if_any()
data = self._get_data(b'blob')
return commands.BlobCommand(mark, data, lineno) | 0.00905 |
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, '' | 0.002538 |
def build_payload(self, tag, message):
""" Encode, sign payload(optional) and attach subscription tag """
message = self.encode(message)
message = self.sign(message)
payload = bytes(tag.encode('utf-8')) + message
return payload | 0.007491 |
def _safe_getattr(o):
"""Gets the attribute from the specified object, taking the acorn decoration
into account.
"""
def getattribute(attr): # pragma: no cover
if hasattr(o, "__acornext__") and o.__acornext__ is not None:
return o.__acornext__.__getattribute__(attr)
elif hasattr(o, "__acorn__") and o.__acorn__ is not None:
#Some of the functions have the original function (when it was not
#extended) in the __acorn__ attribute.
return o.__acorn__.__getattribute__(attr)
else:
return getattr(o, attr)
return getattribute | 0.007987 |
def value(self):
"""Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
if self.url and not self.column.auto == "form_field":
link_attrs = ' '.join(['%s="%s"' % (k, v) for (k, v) in
self.column.link_attrs.items()])
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" %s>%s</a>' % (
(escape(self.url),
link_attrs,
escape(six.text_type(data)))))
return data | 0.001605 |
def create(style_dataset, content_dataset, style_feature=None,
content_feature=None, max_iterations=None, model='resnet-16',
verbose=True, batch_size = 6, **kwargs):
"""
Create a :class:`StyleTransfer` model.
Parameters
----------
style_dataset: SFrame
Input style images. The columns named by the ``style_feature`` parameters will
be extracted for training the model.
content_dataset : SFrame
Input content images. The columns named by the ``content_feature`` parameters will
be extracted for training the model.
style_feature: string
Name of the column containing the input images in style SFrame.
'None' (the default) indicates the only image column in the style SFrame
should be used as the feature.
content_feature: string
Name of the column containing the input images in content SFrame.
'None' (the default) indicates the only image column in the content
SFrame should be used as the feature.
max_iterations : int
The number of training iterations. If 'None' (the default), then it will
be automatically determined based on the amount of data you provide.
model : string optional
Style transfer model to use:
- "resnet-16" : Fast and small-sized residual network that uses
VGG-16 as reference network during training.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve training
throughput.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : StyleTransfer
A trained :class:`StyleTransfer` model.
See Also
--------
StyleTransfer
Examples
--------
.. sourcecode:: python
# Create datasets
>>> content_dataset = turicreate.image_analysis.load_images('content_images/')
>>> style_dataset = turicreate.image_analysis.load_images('style_images/')
# Train a style transfer model
>>> model = turicreate.style_transfer.create(content_dataset, style_dataset)
# Stylize an image on all styles
>>> stylized_images = model.stylize(data)
# Visualize the stylized images
>>> stylized_images.explore()
"""
if len(style_dataset) == 0:
raise _ToolkitError("style_dataset SFrame cannot be empty")
if len(content_dataset) == 0:
raise _ToolkitError("content_dataset SFrame cannot be empty")
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from .._mxnet import _mxnet_utils
if style_feature is None:
style_feature = _tkutl._find_only_image_column(style_dataset)
if content_feature is None:
content_feature = _tkutl._find_only_image_column(content_dataset)
if verbose:
print("Using '{}' in style_dataset as feature column and using "
"'{}' in content_dataset as feature column".format(style_feature, content_feature))
_raise_error_if_not_training_sframe(style_dataset, style_feature)
_raise_error_if_not_training_sframe(content_dataset, content_feature)
params = {
'batch_size': batch_size,
'vgg16_content_loss_layer': 2, # conv3_3 layer
'lr': 0.001,
'content_loss_mult': 1.0,
'style_loss_mult': [1e-4, 1e-4, 1e-4, 1e-4], # conv 1-4 layers
'finetune_all_params': True,
'pretrained_weights': False,
'print_loss_breakdown': False,
'input_shape': (256, 256),
'training_content_loader_type': 'stretch',
'use_augmentation': False,
'sequential_image_processing': False,
# Only used if use_augmentaion is True
'aug_resize': 0,
'aug_min_object_covered': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_area_range': (.05, 1.5),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
_content_loss_mult = params['content_loss_mult']
_style_loss_mult = params['style_loss_mult']
num_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size'])
batch_size_each = params['batch_size'] // max(num_gpus, 1)
batch_size = max(num_gpus, 1) * batch_size_each
input_shape = params['input_shape']
iterations = 0
if max_iterations is None:
max_iterations = len(style_dataset) * 10000
if verbose:
print('Setting max_iterations to be {}'.format(max_iterations))
# data loader
if params['use_augmentation']:
content_loader_type = '%s-with-augmentation' % params['training_content_loader_type']
else:
content_loader_type = params['training_content_loader_type']
content_images_loader = _SFrameSTIter(content_dataset, batch_size, shuffle=True,
feature_column=content_feature, input_shape=input_shape,
loader_type=content_loader_type, aug_params=params,
sequential=params['sequential_image_processing'])
ctx = _mxnet_utils.get_mxnet_context(max_devices=params['batch_size'])
num_styles = len(style_dataset)
# TRANSFORMER MODEL
from ._model import Transformer as _Transformer
transformer_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS[model]().get_model_path()
transformer = _Transformer(num_styles, batch_size_each)
transformer.collect_params().initialize(ctx=ctx)
if params['pretrained_weights']:
transformer.load_params(transformer_model_path, ctx, allow_missing=True)
# For some reason, the transformer fails to hybridize for training, so we
# avoid this until resolved
# transformer.hybridize()
# VGG MODEL
from ._model import Vgg16 as _Vgg16
vgg_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS['Vgg16']().get_model_path()
vgg_model = _Vgg16()
vgg_model.collect_params().initialize(ctx=ctx)
vgg_model.load_params(vgg_model_path, ctx=ctx, ignore_extra=True)
vgg_model.hybridize()
# TRAINER
from mxnet import gluon as _gluon
from ._model import gram_matrix as _gram_matrix
if params['finetune_all_params']:
trainable_params = transformer.collect_params()
else:
trainable_params = transformer.collect_params('.*gamma|.*beta')
trainer = _gluon.Trainer(trainable_params, 'adam', {'learning_rate': params['lr']})
mse_loss = _gluon.loss.L2Loss()
start_time = _time.time()
smoothed_loss = None
last_time = 0
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=params['batch_size'])
num_mxnet_gpus = len(cuda_gpus)
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 260 + batch_size_each * 880 + num_styles * 1.4
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=False,
cuda_mem_req=cuda_mem_req, has_mps_impl=False)
#
# Pre-compute gram matrices for style images
#
if verbose:
print('Analyzing visual features of the style images')
style_images_loader = _SFrameSTIter(style_dataset, batch_size, shuffle=False, num_epochs=1,
feature_column=style_feature, input_shape=input_shape,
loader_type='stretch',
sequential=params['sequential_image_processing'])
num_layers = len(params['style_loss_mult'])
gram_chunks = [[] for _ in range(num_layers)]
for s_batch in style_images_loader:
s_data = _gluon.utils.split_and_load(s_batch.data[0], ctx_list=ctx, batch_axis=0)
for s in s_data:
vgg16_s = _vgg16_data_prep(s)
ret = vgg_model(vgg16_s)
grams = [_gram_matrix(x) for x in ret]
for i, gram in enumerate(grams):
if gram.context != _mx.cpu(0):
gram = gram.as_in_context(_mx.cpu(0))
gram_chunks[i].append(gram)
del style_images_loader
grams = [
# The concatenated styles may be padded, so we slice overflow
_mx.nd.concat(*chunks, dim=0)[:num_styles]
for chunks in gram_chunks
]
# A context->grams look-up table, where all the gram matrices have been
# distributed
ctx_grams = {}
if ctx[0] == _mx.cpu(0):
ctx_grams[_mx.cpu(0)] = grams
else:
for ctx0 in ctx:
ctx_grams[ctx0] = [gram.as_in_context(ctx0) for gram in grams]
#
# Training loop
#
vgg_content_loss_layer = params['vgg16_content_loss_layer']
rs = _np.random.RandomState(1234)
while iterations < max_iterations:
content_images_loader.reset()
for c_batch in content_images_loader:
c_data = _gluon.utils.split_and_load(c_batch.data[0], ctx_list=ctx, batch_axis=0)
Ls = []
curr_content_loss = []
curr_style_loss = []
with _mx.autograd.record():
for c in c_data:
# Randomize styles to train
indices = _mx.nd.array(rs.randint(num_styles, size=batch_size_each),
dtype=_np.int64, ctx=c.context)
# Generate pastiche
p = transformer(c, indices)
# mean subtraction
vgg16_p = _vgg16_data_prep(p)
vgg16_c = _vgg16_data_prep(c)
# vgg forward
p_vgg_outputs = vgg_model(vgg16_p)
c_vgg_outputs = vgg_model(vgg16_c)
c_content_layer = c_vgg_outputs[vgg_content_loss_layer]
p_content_layer = p_vgg_outputs[vgg_content_loss_layer]
# Calculate Loss
# Style Loss between style image and stylized image
# Ls = sum of L2 norm of gram matrix of vgg16's conv layers
style_losses = []
for gram, p_vgg_output, style_loss_mult in zip(ctx_grams[c.context], p_vgg_outputs, _style_loss_mult):
gram_s_vgg = gram[indices]
gram_p_vgg = _gram_matrix(p_vgg_output)
style_losses.append(style_loss_mult * mse_loss(gram_s_vgg, gram_p_vgg))
style_loss = _mx.nd.add_n(*style_losses)
# Content Loss between content image and stylized image
# Lc = L2 norm at a single layer in vgg16
content_loss = _content_loss_mult * mse_loss(c_content_layer,
p_content_layer)
curr_content_loss.append(content_loss)
curr_style_loss.append(style_loss)
# Divide loss by large number to get into a more legible
# range
total_loss = (content_loss + style_loss) / 10000.0
Ls.append(total_loss)
for L in Ls:
L.backward()
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
if smoothed_loss is None:
smoothed_loss = cur_loss
else:
smoothed_loss = 0.9 * smoothed_loss + 0.1 * cur_loss
iterations += 1
trainer.step(batch_size)
if verbose and iterations == 1:
# Print progress table header
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
cur_time = _time.time()
if verbose and (cur_time > last_time + 10 or iterations == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter = iterations, loss = smoothed_loss,
time = elapsed_time , width = column_width-1))
if params['print_loss_breakdown']:
print_content_loss = _np.mean([L.asnumpy()[0] for L in curr_content_loss])
print_style_loss = _np.mean([L.asnumpy()[0] for L in curr_style_loss])
print('Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'.format(cur_loss, print_content_loss, print_style_loss))
last_time = cur_time
if iterations == max_iterations:
print(hr)
break
training_time = _time.time() - start_time
style_sa = style_dataset[style_feature]
idx_column = _tc.SArray(range(0, style_sa.shape[0]))
style_sframe = _tc.SFrame({"style": idx_column, style_feature: style_sa})
# Save the model state
state = {
'_model': transformer,
'_training_time_as_string': _seconds_as_string(training_time),
'batch_size': batch_size,
'num_styles': num_styles,
'model': model,
'input_image_shape': input_shape,
'styles': style_sframe,
'num_content_images': len(content_dataset),
'training_time': training_time,
'max_iterations': max_iterations,
'training_iterations': iterations,
'training_epochs': content_images_loader.cur_epoch,
'style_feature': style_feature,
'content_feature': content_feature,
"_index_column": "style",
'training_loss': smoothed_loss,
}
return StyleTransfer(state) | 0.003663 |
def _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian):
"""Check arguments and create SF_INFO struct."""
original_format = format
if format is None:
format = _get_format_from_filename(file, mode)
assert isinstance(format, (_unicode, str))
else:
_check_format(format)
info = _ffi.new("SF_INFO*")
if 'r' not in mode or format.upper() == 'RAW':
if samplerate is None:
raise TypeError("samplerate must be specified")
info.samplerate = samplerate
if channels is None:
raise TypeError("channels must be specified")
info.channels = channels
info.format = _format_int(format, subtype, endian)
else:
if any(arg is not None for arg in (
samplerate, channels, original_format, subtype, endian)):
raise TypeError("Not allowed for existing files (except 'RAW'): "
"samplerate, channels, format, subtype, endian")
return info | 0.000954 |
def tokenize(self, sentence,
normalize=True,
is_feature=False,
is_surface=False,
return_list=False,
func_normalizer=text_preprocess.normalize_text):
# type: (text_type, bool, bool, bool, bool, Callable[[text_type], text_type]) -> Union[TokenizedSenetence, List[text_type]]
"""* What you can do
-
"""
if normalize:
normalized_sentence = func_normalizer(sentence)
else:
normalized_sentence = sentence
ml_token_object = self.call_juman_interface(normalized_sentence)
token_objects = [
juman_utils.extract_morphological_information(
mrph_object=morph_object,
is_surface=is_surface,
is_feature=is_feature
)
for morph_object in ml_token_object]
if return_list:
tokenized_objects = TokenizedSenetence(
sentence=sentence,
tokenized_objects=token_objects)
return tokenized_objects.convert_list_object()
else:
tokenized_objects = TokenizedSenetence(
sentence=sentence,
tokenized_objects=token_objects)
return tokenized_objects | 0.006902 |
def vsan_supported(service_instance):
'''
Returns whether vsan is supported on the vCenter:
api version needs to be 6 or higher
service_instance
Service instance to the host or vCenter
'''
try:
api_version = service_instance.content.about.apiVersion
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
if int(api_version.split('.')[0]) < 6:
return False
return True | 0.001267 |
def register_microscope_files(self, plate_name, acquisition_name,
path):
'''
Register microscope files contained in `path` (Server side).
If `path` is a directory, upload all files contained in it.
Parameters
----------
plate_name: str
name of the parent plate
acquisition_name: str
name of the parent acquisition
path: str
path to a directory on disk where the files that should be uploaded
are located
Returns:
-------
List[str]
names of registered files
'''
logger.info(
'register microscope files for experiment "%s", plate "%s" '
'and acquisition "%s"',
self.experiment_name, plate_name, acquisition_name
)
acquisition_id = self._get_acquisition_id(plate_name, acquisition_name)
register_url = self._build_api_url(
'/experiments/{experiment_id}/acquisitions/{acquisition_id}/register'
.format(experiment_id=self._experiment_id, acquisition_id=acquisition_id)
)
logger.debug('register files for upload')
url = self._build_api_url('/experiments/{experiment_id}/acquisitions/{acquisition_id}/register'.format(experiment_id=self._experiment_id, acquisition_id=acquisition_id))
payload = {'path': path}
res = self._session.post(url, json=payload)
res.raise_for_status()
return res.json()['message'] | 0.005879 |
def timed(f: Optional[Callable[[int], None]] = None):
"""Time the execution of code in the with-block, calling the function
f (if it is given) with the resulting time in nanoseconds."""
start = time.perf_counter()
yield
end = time.perf_counter()
if f:
ns = int((end - start) * 1_000_000_000)
f(ns) | 0.002967 |
def fit(self, X, y, **fit_params):
"""Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator.
"""
return default_client().sync(self._fit, X, y, **fit_params) | 0.006024 |
def line_to_variables(source, line, inherit_permission, parent):
"""
Returns a list of variables declared in the provided line of code. The
line of code should be provided as a string.
"""
vartype, kind, strlen, proto, rest = parse_type(line,parent.strings,parent.settings)
attribs = []
intent = ""
optional = False
permission = inherit_permission
parameter = False
attribmatch = ATTRIBSPLIT_RE.match(rest)
if attribmatch:
attribstr = attribmatch.group(1).strip()
declarestr = attribmatch.group(2).strip()
tmp_attribs = ford.utils.paren_split(",",attribstr)
for i in range(len(tmp_attribs)):
tmp_attribs[i] = tmp_attribs[i].strip()
if tmp_attribs[i].lower() == "public": permission = "public"
elif tmp_attribs[i].lower() == "private": permission = "private"
elif tmp_attribs[i].lower() == "protected": permission = "protected"
elif tmp_attribs[i].lower() == "optional": optional = True
elif tmp_attribs[i].lower() == "parameter": parameter = True
elif tmp_attribs[i].lower().replace(' ','') == "intent(in)":
intent = 'in'
elif tmp_attribs[i].lower().replace(' ','') == "intent(out)":
intent = 'out'
elif tmp_attribs[i].lower().replace(' ','') == "intent(inout)":
intent = 'inout'
else: attribs.append(tmp_attribs[i])
else:
declarestr = ATTRIBSPLIT2_RE.match(rest).group(2)
declarations = ford.utils.paren_split(",",declarestr)
varlist = []
for dec in declarations:
dec = re.sub(" ","",dec)
split = ford.utils.paren_split('=',dec)
if len(split) > 1:
name = split[0]
if split[1][0] == '>':
initial = split[1][1:]
points = True
else:
initial = split[1]
points = False
else:
name = dec.strip()
initial = None
points = False
if initial:
initial = COMMA_RE.sub(', ',initial)
search_from = 0
while QUOTES_RE.search(initial[search_from:]):
num = int(QUOTES_RE.search(initial[search_from:]).group()[1:-1])
old_string = NBSP_RE.sub(' ',parent.strings[num])
string = ''
for i in range(len(old_string)):
if old_string[i] == "\\" and (old_string[i+1] in '0123456789' or
old_string[i+1] == 'g'):
string += r'\\'
elif old_string[i] == '(' and old_string[i+1] =='?':
string += r'\('
else:
string += old_string[i]
initial = initial[0:search_from] + QUOTES_RE.sub(string,initial[search_from:],count=1)
search_from += QUOTES_RE.search(initial[search_from:]).end(0)
if proto:
varlist.append(FortranVariable(name,vartype,parent,copy.copy(attribs),intent,
optional,permission,parameter,kind,strlen,list(proto),
[],points,initial))
else:
varlist.append(FortranVariable(name,vartype,parent,copy.copy(attribs),intent,
optional,permission,parameter,kind,strlen,proto,
[],points,initial))
doc = []
docline = source.__next__()
while docline[0:2] == "!" + parent.settings['docmark']:
doc.append(docline[2:])
docline = source.__next__()
source.pass_back(docline)
for var in varlist: var.doc = doc
return varlist | 0.014431 |
def dafec(handle, bufsiz, lenout=_default_len_out):
"""
Extract comments from the comment area of a binary DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafec_c.html
:param handle: Handle of binary DAF opened with read access.
:type handle: int
:param bufsiz: Maximum size, in lines, of buffer.
:type bufsiz: int
:param lenout: Length of strings in output buffer.
:type lenout: int
:return:
Number of extracted comment lines,
buffer where extracted comment lines are placed,
Indicates whether all comments have been extracted.
:rtype: tuple
"""
handle = ctypes.c_int(handle)
buffer = stypes.emptyCharArray(yLen=bufsiz, xLen=lenout)
bufsiz = ctypes.c_int(bufsiz)
lenout = ctypes.c_int(lenout)
n = ctypes.c_int()
done = ctypes.c_int()
libspice.dafec_c(handle, bufsiz, lenout, ctypes.byref(n),
ctypes.byref(buffer), ctypes.byref(done))
return n.value, stypes.cVectorToPython(buffer), bool(done.value) | 0.000951 |
def readInfo(stream):
""" Read previously-written information about diffs. """
try:
for line in stream:
(toUUID, fromUUID, size) = line.split()
try:
size = int(size)
except Exception:
logger.warning("Bad size: %s", size)
continue
logger.debug("diff info: %s %s %d", toUUID, fromUUID, size)
Diff.theKnownSizes[toUUID][fromUUID] = size
except Exception as error:
logger.warn("Can't read .bs info file (%s)", error) | 0.003333 |
def replace_table_rate_rule_by_id(cls, table_rate_rule_id, table_rate_rule, **kwargs):
"""Replace TableRateRule
Replace all attributes of TableRateRule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_table_rate_rule_by_id(table_rate_rule_id, table_rate_rule, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to replace (required)
:param TableRateRule table_rate_rule: Attributes of tableRateRule to replace (required)
:return: TableRateRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_table_rate_rule_by_id_with_http_info(table_rate_rule_id, table_rate_rule, **kwargs)
else:
(data) = cls._replace_table_rate_rule_by_id_with_http_info(table_rate_rule_id, table_rate_rule, **kwargs)
return data | 0.007011 |
def lock(self):
"""
Lock the config database.
Use if Locking/Unlocking is not performaed automatically by lock=False
"""
if not self.locked:
rpc_command = '<Lock/>'
try:
self._execute_rpc(rpc_command)
except XMLCLIError:
raise LockError('Unable to enter in configure exclusive mode!', self)
self.locked = True | 0.006977 |
def _collected_label(collect, label):
"""Label of a collected column."""
if not collect.__name__.startswith('<'):
return label + ' ' + collect.__name__
else:
return label | 0.005051 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.