code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def add_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param to .params file. Example: weights of a fully connected layer."""
self.add_param('arg:%s' % param_name, layer_index, blob_index) | Add an arg param to .params file. Example: weights of a fully connected layer. |
def plot_prof_1(self, species, keystring, xlim1, xlim2, ylim1,
ylim2, symbol=None, show=False):
'''
Plot one species for cycle between xlim1 and xlim2 Only works
with instances of se and mesa _profile.
Parameters
----------
species : list
Which species to plot.
keystring : string or integer
Label that appears in the plot or in the case of se, a
cycle.
xlim1, xlim2 : integer or float
Mass coordinate range.
ylim1, ylim2 : integer or float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None.
show : boolean, optional
Show the ploted graph. The default is False.
'''
plotType=self._classTest()
if plotType=='se':
#tot_mass=self.se.get(keystring,'total_mass')
tot_mass=self.se.get('mini')
age=self.se.get(keystring,'age')
mass=self.se.get(keystring,'mass')
Xspecies=self.se.get(keystring,'iso_massf',species)
mod=keystring
elif plotType=='mesa_profile':
tot_mass=self.header_attr['star_mass']
age=self.header_attr['star_age']
mass=self.get('mass')
mod=self.header_attr['model_number']
Xspecies=self.get(species)
else:
print('This method is not supported for '+str(self.__class__))
return
if symbol == None:
symbol = '-'
x,y=self._logarithm(Xspecies,mass,True,False,10)
#print x
pl.plot(y,x,symbol,label=str(species))
pl.xlim(xlim1,xlim2)
pl.ylim(ylim1,ylim2)
pl.legend()
pl.xlabel('$Mass$ $coordinate$', fontsize=20)
pl.ylabel('$X_{i}$', fontsize=20)
#pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod))
pl.title('Mass='+str(tot_mass)+', cycle='+str(mod))
if show:
pl.show() | Plot one species for cycle between xlim1 and xlim2 Only works
with instances of se and mesa _profile.
Parameters
----------
species : list
Which species to plot.
keystring : string or integer
Label that appears in the plot or in the case of se, a
cycle.
xlim1, xlim2 : integer or float
Mass coordinate range.
ylim1, ylim2 : integer or float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None.
show : boolean, optional
Show the ploted graph. The default is False. |
def start(info):
"""Run the dev server.
Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if
available, to provide `runserver_plus`.
Set the command to use with `options.paved.django.runserver`
Set the port to use with `options.paved.django.runserver_port`
"""
cmd = options.paved.django.runserver
if cmd == 'runserver_plus':
try:
import django_extensions
except ImportError:
info("Could not import django_extensions. Using default runserver.")
cmd = 'runserver'
port = options.paved.django.runserver_port
if port:
cmd = '%s %s' % (cmd, port)
call_manage(cmd) | Run the dev server.
Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if
available, to provide `runserver_plus`.
Set the command to use with `options.paved.django.runserver`
Set the port to use with `options.paved.django.runserver_port` |
def writefile(filename, content):
"""
writes the content into the file
:param filename: the filename
:param content: teh content
:return:
"""
with open(path_expand(filename), 'w') as outfile:
outfile.write(content) | writes the content into the file
:param filename: the filename
:param content: teh content
:return: |
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in | This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'. |
def get_matrix(self):
"""Return a copy of the current transformation matrix (CTM)."""
matrix = Matrix()
cairo.cairo_get_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix | Return a copy of the current transformation matrix (CTM). |
def set_config(self, **config):
"""Shadow all the current config."""
reinit = False
if 'stdopt' in config:
stdopt = config.pop('stdopt')
reinit = (stdopt != self.stdopt)
self.stdopt = stdopt
if 'attachopt' in config:
attachopt = config.pop('attachopt')
reinit = reinit or (attachopt != self.attachopt)
self.attachopt = attachopt
if 'attachvalue' in config:
attachvalue = config.pop('attachvalue')
reinit = reinit or (attachvalue != self.attachvalue)
self.attachvalue = attachvalue
if 'auto2dashes' in config:
self.auto2dashes = config.pop('auto2dashes')
if 'name' in config:
name = config.pop('name')
reinit = reinit or (name != self.name)
self.name = name
if 'help' in config:
self.help = config.pop('help')
self._set_or_remove_extra_handler(
self.help, ('--help', '-h'), self.help_handler)
if 'version' in config:
self.version = config.pop('version')
self._set_or_remove_extra_handler(
self.version is not None,
('--version', '-v'),
self.version_handler)
if 'case_sensitive' in config:
case_sensitive = config.pop('case_sensitive')
reinit = reinit or (case_sensitive != self.case_sensitive)
self.case_sensitive = case_sensitive
if 'optionsfirst' in config:
self.options_first = config.pop('optionsfirst')
if 'appearedonly' in config:
self.appeared_only = config.pop('appearedonly')
if 'namedoptions' in config:
namedoptions = config.pop('namedoptions')
reinit = reinit or (namedoptions != self.namedoptions)
self.namedoptions = namedoptions
if 'extra' in config:
self.extra.update(self._formal_extra(config.pop('extra')))
if config: # should be empty
raise ValueError(
'`%s` %s not accepted key argument%s' % (
'`, `'.join(config),
'is' if len(config) == 1 else 'are',
'' if len(config) == 1 else 's'
))
if self.doc is not None and reinit:
logger.warning(
'You changed the config that requires re-initialized'
' `Docpie` object. Create a new one instead'
)
self._init() | Shadow all the current config. |
def active(self):
""" Returns if task is active.
"""
if not os.path.isfile(self._paths['active_file']):
return False
return self._loaded | Returns if task is active. |
def schema(self):
"""The nested Schema object.
.. versionchanged:: 1.0.0
Renamed from `serializer` to `schema`
"""
if not self.__schema:
# Inherit context from parent.
context = getattr(self.parent, 'context', {})
if isinstance(self.nested, SchemaABC):
self.__schema = self.nested
self.__schema.context.update(context)
else:
if isinstance(self.nested, type) and issubclass(self.nested, SchemaABC):
schema_class = self.nested
elif not isinstance(self.nested, basestring):
raise ValueError(
'Nested fields must be passed a '
'Schema, not {}.'.format(self.nested.__class__),
)
elif self.nested == 'self':
schema_class = self.parent.__class__
else:
schema_class = class_registry.get_class(self.nested)
self.__schema = schema_class(
many=self.many,
only=self.only,
exclude=self.exclude,
context=context,
load_only=self._nested_normalized_option('load_only'),
dump_only=self._nested_normalized_option('dump_only'),
)
return self.__schema | The nested Schema object.
.. versionchanged:: 1.0.0
Renamed from `serializer` to `schema` |
def QA_fetch_get_option_50etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510050C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
if strName.startswith("510050"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '50ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '50ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
return rows | #🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series |
def _resolve_file(file_name):
"""
Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None
"""
if not file_name:
return None
path = os.path.realpath(file_name)
if os.path.isfile(path):
return path
return None | Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None |
def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() | Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID |
def _log(self, name, element): # pylint: disable=no-self-use
"""
Log Response and Tag elements. Do nothing if elements is none of them.
"""
from bs4 import BeautifulSoup, Tag
if isinstance(element, Response):
LOGGER.debug('%s response: URL=%s Code=%s', name, element.url, element.status_code)
elif isinstance(element, (BeautifulSoup, Tag)):
LOGGER.debug('%s HTML:\n%s', name, element) | Log Response and Tag elements. Do nothing if elements is none of them. |
def compute_difficulty(
bomb_delay: int,
parent_header: BlockHeader,
timestamp: int) -> int:
"""
https://github.com/ethereum/EIPs/issues/100
"""
parent_timestamp = parent_header.timestamp
validate_gt(timestamp, parent_timestamp, title="Header.timestamp")
parent_difficulty = parent_header.difficulty
offset = parent_difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR
has_uncles = parent_header.uncles_hash != EMPTY_UNCLE_HASH
adj_factor = max(
(
(2 if has_uncles else 1) -
((timestamp - parent_timestamp) // BYZANTIUM_DIFFICULTY_ADJUSTMENT_CUTOFF)
),
-99,
)
difficulty = max(
parent_difficulty + offset * adj_factor,
min(parent_header.difficulty, DIFFICULTY_MINIMUM)
)
num_bomb_periods = (
max(
0,
parent_header.block_number + 1 - bomb_delay,
) // BOMB_EXPONENTIAL_PERIOD
) - BOMB_EXPONENTIAL_FREE_PERIODS
if num_bomb_periods >= 0:
return max(difficulty + 2**num_bomb_periods, DIFFICULTY_MINIMUM)
else:
return difficulty | https://github.com/ethereum/EIPs/issues/100 |
def _tcpdump_callback(self, line, kill_switch):
"""Callback function to handle tcpdump"""
line = line.lower()
if ("listening" in line) or ("reading" in line):
self.started = True
if ("no suitable device" in line):
self.error = True
self.kill_switch()
if "by kernel" in line:
self.stopped = True | Callback function to handle tcpdump |
def write_stats_as_csv(gtfs, path_to_csv, re_write=False):
"""
Writes data from get_stats to csv file
Parameters
----------
gtfs: GTFS
path_to_csv: str
filepath to the csv file to be generated
re_write:
insted of appending, create a new one.
"""
stats_dict = get_stats(gtfs)
# check if file exist
if re_write:
os.remove(path_to_csv)
#if not os.path.isfile(path_to_csv):
# is_new = True
#else:
# is_new = False
is_new = True
mode = 'r' if os.path.exists(path_to_csv) else 'w+'
with open(path_to_csv, mode) as csvfile:
for line in csvfile:
if line:
is_new = False
else:
is_new = True
with open(path_to_csv, 'a') as csvfile:
if (sys.version_info > (3, 0)):
delimiter = u","
else:
delimiter = b","
statswriter = csv.writer(csvfile, delimiter=delimiter)
# write column names if
if is_new:
statswriter.writerow([key for key in sorted(stats_dict.keys())])
row_to_write = []
# write stats row sorted by column name
for key in sorted(stats_dict.keys()):
row_to_write.append(stats_dict[key])
statswriter.writerow(row_to_write) | Writes data from get_stats to csv file
Parameters
----------
gtfs: GTFS
path_to_csv: str
filepath to the csv file to be generated
re_write:
insted of appending, create a new one. |
def unattach_rconfiguration(context, id, rconfiguration_id):
"""unattach_rconfiguration(context, id, rconfiguration_id):
Unattach a rconfiguration from a remoteci.
>>> dcictl remoteci-unattach-rconfiguration id [OPTIONS]
:param string id: ID of the remoteci to unattach the rconfiguration from
[required]
:param string rconfiguration_id: ID of the rconfiguration to unattach
[required]
"""
result = remoteci.delete_rconfiguration(
context, id=id, rconfiguration_id=rconfiguration_id)
if result.status_code == 204:
utils.print_json(
{'id': id, 'message': 'Rconfiguration unattached from RemoteCI'})
else:
utils.format_output(result, context.format) | unattach_rconfiguration(context, id, rconfiguration_id):
Unattach a rconfiguration from a remoteci.
>>> dcictl remoteci-unattach-rconfiguration id [OPTIONS]
:param string id: ID of the remoteci to unattach the rconfiguration from
[required]
:param string rconfiguration_id: ID of the rconfiguration to unattach
[required] |
def get_site_type_dummy_variables(self, sites):
"""
Binary rock/soil classification dummy variable based on sites.vs30.
"``S`` is 1 for a rock site and 0 otherwise" (p. 1201).
"""
is_rock = np.array(sites.vs30 > self.NEHRP_BC_BOUNDARY)
return is_rock | Binary rock/soil classification dummy variable based on sites.vs30.
"``S`` is 1 for a rock site and 0 otherwise" (p. 1201). |
def cast_to_seq_record(obj, alphabet=IUPAC.extended_protein, id="<unknown id>", name="<unknown name>",
description="<unknown description>", dbxrefs=None,
features=None, annotations=None,
letter_annotations=None):
"""Return a SeqRecord representation of a string or Seq object.
Args:
obj (str, Seq, SeqRecord): Sequence string or Biopython Seq object
alphabet: See Biopython SeqRecord docs
id: See Biopython SeqRecord docs
name: See Biopython SeqRecord docs
description: See Biopython SeqRecord docs
dbxrefs: See Biopython SeqRecord docs
features: See Biopython SeqRecord docs
annotations: See Biopython SeqRecord docs
letter_annotations: See Biopython SeqRecord docs
Returns:
SeqRecord: SeqRecord representation of the sequence
"""
if isinstance(obj, SeqRecord):
return obj
if isinstance(obj, Seq):
return SeqRecord(obj, id, name, description, dbxrefs, features, annotations, letter_annotations)
if isinstance(obj, str):
obj = obj.upper()
return SeqRecord(Seq(obj, alphabet), id, name, description, dbxrefs, features, annotations, letter_annotations)
else:
raise ValueError('Must provide a string, Seq, or SeqRecord object.') | Return a SeqRecord representation of a string or Seq object.
Args:
obj (str, Seq, SeqRecord): Sequence string or Biopython Seq object
alphabet: See Biopython SeqRecord docs
id: See Biopython SeqRecord docs
name: See Biopython SeqRecord docs
description: See Biopython SeqRecord docs
dbxrefs: See Biopython SeqRecord docs
features: See Biopython SeqRecord docs
annotations: See Biopython SeqRecord docs
letter_annotations: See Biopython SeqRecord docs
Returns:
SeqRecord: SeqRecord representation of the sequence |
def _write_box_information(xml_file, structure, ref_distance):
"""Write box information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
structure : parmed.Structure
Parmed structure object
ref_energy : float, default=1.0
Reference energy for conversion to reduced units
"""
if np.allclose(structure.box[3:6], np.array([90, 90, 90])):
box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}"/>\n'
xml_file.write(box_str.format(*structure.box[:3] / ref_distance))
else:
a, b, c = structure.box[0:3] / ref_distance
alpha, beta, gamma = np.radians(structure.box[3:6])
lx = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
ly = np.sqrt(b**2 - xy**2)
yz = (b*c*np.cos(alpha) - xy*xz) / ly
lz = np.sqrt(c**2 - xz**2 - yz**2)
box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}" xy="{}" xz="{}" yz="{}"/>\n'
xml_file.write(box_str.format(lx, ly, lz, xy, xz, yz)) | Write box information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
structure : parmed.Structure
Parmed structure object
ref_energy : float, default=1.0
Reference energy for conversion to reduced units |
def sizeof(self, context=None) -> int:
"""
Return the size of the construct in bytes.
:param context: Optional context dictionary.
"""
if context is None:
context = Context()
if not isinstance(context, Context):
context = Context(context)
try:
return self._sizeof(context)
except Error:
raise
except Exception as exc:
raise SizeofError(str(exc)) | Return the size of the construct in bytes.
:param context: Optional context dictionary. |
def get_feature_flag_by_name(self, name, check_feature_exists=None):
"""GetFeatureFlagByName.
[Preview API] Retrieve information on a single feature flag and its current states
:param str name: The name of the feature to retrieve
:param bool check_feature_exists: Check if feature exists
:rtype: :class:`<FeatureFlag> <azure.devops.v5_0.feature_availability.models.FeatureFlag>`
"""
route_values = {}
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
query_parameters = {}
if check_feature_exists is not None:
query_parameters['checkFeatureExists'] = self._serialize.query('check_feature_exists', check_feature_exists, 'bool')
response = self._send(http_method='GET',
location_id='3e2b80f8-9e6f-441e-8393-005610692d9c',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('FeatureFlag', response) | GetFeatureFlagByName.
[Preview API] Retrieve information on a single feature flag and its current states
:param str name: The name of the feature to retrieve
:param bool check_feature_exists: Check if feature exists
:rtype: :class:`<FeatureFlag> <azure.devops.v5_0.feature_availability.models.FeatureFlag>` |
def _get_nws_feed(self):
"""get nws alert feed, and cache it"""
url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())
# pylint: disable=E1103
xml = requests.get(url).content
return xml | get nws alert feed, and cache it |
def push(self, metric_name=None, metric_value=None, volume=None):
""" Ship that shit off to graphite broski
"""
graphite_path = self.path_prefix
graphite_path += '.' + self.device + '.' + 'volume'
graphite_path += '.' + volume + '.' + metric_name
metric = Metric(graphite_path, metric_value, precision=4,
host=self.device)
self.publish_metric(metric) | Ship that shit off to graphite broski |
def _set_sip_ipv4_address(self, v, load=False):
"""
Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_sip_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sip_ipv4_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""",
'defined-type': "brocade-bgp:sip-ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""",
})
self.__sip_ipv4_address = t
if hasattr(self, '_set'):
self._set() | Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_sip_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sip_ipv4_address() directly. |
def _from_dict(cls, _dict):
"""Initialize a ToneCategory object from a json dictionary."""
args = {}
if 'tones' in _dict:
args['tones'] = [
ToneScore._from_dict(x) for x in (_dict.get('tones'))
]
else:
raise ValueError(
'Required property \'tones\' not present in ToneCategory JSON')
if 'category_id' in _dict:
args['category_id'] = _dict.get('category_id')
else:
raise ValueError(
'Required property \'category_id\' not present in ToneCategory JSON'
)
if 'category_name' in _dict:
args['category_name'] = _dict.get('category_name')
else:
raise ValueError(
'Required property \'category_name\' not present in ToneCategory JSON'
)
return cls(**args) | Initialize a ToneCategory object from a json dictionary. |
def create_bookmark_action(parent, url, title, icon=None, shortcut=None):
"""Create bookmark action"""
@Slot()
def open_url():
return programs.start_file(url)
return create_action( parent, title, shortcut=shortcut, icon=icon,
triggered=open_url) | Create bookmark action |
def min_volatility(self):
"""Get the minimum variance solution"""
if not self.w:
self.solve()
var = []
for w in self.w:
a = np.dot(np.dot(w.T, self.cov_matrix), w)
var.append(a)
# return min(var)**.5, self.w[var.index(min(var))]
self.weights = self.w[var.index(min(var))].reshape((self.n_assets,))
return dict(zip(self.tickers, self.weights)) | Get the minimum variance solution |
def readXML(self):
"""
Read XML.
"""
data = self.readLongString()
root = xml.fromstring(data)
self.context.addObject(root)
return root | Read XML. |
def load_modules(self, filepaths):
"""
Loads the modules from their `filepaths`. A filepath may be
a directory filepath if there is an `__init__.py` file in the
directory.
If a filepath errors, the exception will be caught and logged
in the logger.
Returns a list of modules.
"""
# removes filepaths from processed if they are not in sys.modules
self._update_loaded_modules()
filepaths = util.return_set(filepaths)
modules = []
for filepath in filepaths:
filepath = self._clean_filepath(filepath)
# check to see if already processed and move onto next if so
if self._processed_filepath(filepath):
continue
module_name = util.get_module_name(filepath)
plugin_module_name = util.create_unique_module_name(module_name)
try:
module = load_source(plugin_module_name, filepath)
# Catch all exceptions b/c loader will return errors
# within the code itself, such as Syntax, NameErrors, etc.
except Exception:
exc_info = sys.exc_info()
self._log.error(msg=self._error_string.format(filepath),
exc_info=exc_info)
continue
self.loaded_modules.add(module.__name__)
modules.append(module)
self.processed_filepaths[module.__name__] = filepath
return modules | Loads the modules from their `filepaths`. A filepath may be
a directory filepath if there is an `__init__.py` file in the
directory.
If a filepath errors, the exception will be caught and logged
in the logger.
Returns a list of modules. |
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"):
""" Pretty prints object name
@obj: the object whose name you want to pretty print
@maxlen: #int maximum length of an object name to print
@color: your choice of :mod:colors or |None|
-> #str pretty object name
..
from vital.debug import Look
print(Look.pretty_objname(dict))
# -> 'dict\x1b[1;36m<builtins>\x1b[1;m'
..
"""
parent_name = lambda_sub("", get_parent_name(obj) or "")
objname = get_obj_name(obj)
if color:
objname += colorize("<{}>".format(parent_name), color, close=False)
else:
objname += "<{}>".format(parent_name)
objname = objname if len(objname) < maxlen else \
objname[:(maxlen-1)]+"…>"
if color:
objname += colors.RESET
return objname | Pretty prints object name
@obj: the object whose name you want to pretty print
@maxlen: #int maximum length of an object name to print
@color: your choice of :mod:colors or |None|
-> #str pretty object name
..
from vital.debug import Look
print(Look.pretty_objname(dict))
# -> 'dict\x1b[1;36m<builtins>\x1b[1;m'
.. |
def zonalstats(features, raster, all_touched, band, categorical,
indent, info, nodata, prefix, stats, sequence, use_rs):
'''zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
'''
if info:
logging.basicConfig(level=logging.INFO)
if stats is not None:
stats = stats.split(" ")
if 'all' in [x.lower() for x in stats]:
stats = "ALL"
zonal_results = gen_zonal_stats(
features,
raster,
all_touched=all_touched,
band=band,
categorical=categorical,
nodata=nodata,
stats=stats,
prefix=prefix,
geojson_out=True)
if sequence:
for feature in zonal_results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(zonal_results)})) | zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson |
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True):
'''pop a range from the :class:`OrderedMixin`'''
backend = self.backend
res = backend.structure(self).ipop_range(start, stop,
withscores=withscores)
if not callback:
callback = self.load_data if withscores else self.load_values
return backend.execute(res, callback) | pop a range from the :class:`OrderedMixin` |
def fromFile(cls, filepath):
"""
Creates a proxy instance from the inputted registry file.
:param filepath | <str>
:return <PluginProxy> || None
"""
xdata = ElementTree.parse(nstr(filepath))
xroot = xdata.getroot()
# collect variable information
name = xroot.get('name')
ver = float(xroot.get('version', '1.0'))
if not name:
name = os.path.basename(filepath).split('.')
if name == '__init__':
name = os.path.normpath(filepath).split(os.path.sep)[-2]
name = projex.text.pretty(name)
icon = xroot.get('icon', './icon.png')
ximport = xroot.find('import')
if ximport is not None:
importpath = ximport.get('path', './__init__.py')
else:
importpath = './__init__.py'
params = {'description': '', 'author': '', 'email': '', 'url': ''}
for param, default in params.items():
xdata = xroot.find(param)
if xdata is not None:
params[param] = xdata.text
# generate the proxy information
proxy = PluginProxy(cls, name, ver)
proxy.setImportPath(importpath)
proxy.setDescription(params['description'])
proxy.setAuthor(params['author'])
proxy.setEmail(params['email'])
proxy.setUrl(params['url'])
proxy.setFilepath(filepath)
return proxy | Creates a proxy instance from the inputted registry file.
:param filepath | <str>
:return <PluginProxy> || None |
def get_features(self, mapobject_type_name):
'''Gets features for a given object type.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
Returns
-------
List[Dict[str, str]]
information about each feature
See also
--------
:func:`tmserver.api.feature.get_features`
:class:`tmlib.models.feature.Feature`
'''
logger.info(
'get features of experiment "%s", object type "%s"',
self.experiment_name, mapobject_type_name
)
mapobject_type_id = self._get_mapobject_type_id(mapobject_type_name)
url = self._build_api_url(
'/experiments/{experiment_id}/mapobject_types/{mapobject_type_id}/features'.format(
experiment_id=self._experiment_id,
mapobject_type_id=mapobject_type_id
)
)
res = self._session.get(url)
res.raise_for_status()
return res.json()['data'] | Gets features for a given object type.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
Returns
-------
List[Dict[str, str]]
information about each feature
See also
--------
:func:`tmserver.api.feature.get_features`
:class:`tmlib.models.feature.Feature` |
def banlist(self, channel):
"""
Get the channel banlist.
Required arguments:
* channel - Channel of which to get the banlist for.
"""
with self.lock:
self.is_in_channel(channel)
self.send('MODE %s b' % channel)
bans = []
while self.readable():
msg = self._recv(expected_replies=('367', '368'))
if msg[0] == '367':
banmask, who, timestamp = msg[2].split()[1:]
bans.append((self._from_(banmask), who, \
self._m_time.localtime(int(timestamp))))
elif msg[0] == '368':
break
return bans | Get the channel banlist.
Required arguments:
* channel - Channel of which to get the banlist for. |
def describe_event_source_mapping(UUID=None, EventSourceArn=None,
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an event source mapping ID or an event source ARN and FunctionName,
obtain the current settings of that mapping.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_event_source_mapping uuid
'''
ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
FunctionName=FunctionName)
if not ids:
return {'event_source_mapping': None}
UUID = ids[0]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
desc = conn.get_event_source_mapping(UUID=UUID)
if desc:
keys = ('UUID', 'BatchSize', 'EventSourceArn',
'FunctionArn', 'LastModified', 'LastProcessingResult',
'State', 'StateTransitionReason')
return {'event_source_mapping': dict([(k, desc.get(k)) for k in keys])}
else:
return {'event_source_mapping': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Given an event source mapping ID or an event source ARN and FunctionName,
obtain the current settings of that mapping.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_event_source_mapping uuid |
def get_grade_entry_form_for_update(self, grade_entry_id):
"""Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
if (grade_entry_id.get_identifier_namespace() != 'grading.GradeEntry' or
grade_entry_id.get_authority() != self._authority):
raise errors.InvalidArgument()
result = collection.find_one({'_id': ObjectId(grade_entry_id.get_identifier())})
obj_form = objects.GradeEntryForm(
osid_object_map=result,
effective_agent_id=str(self.get_effective_agent_id()),
runtime=self._runtime,
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not UPDATED
return obj_form | Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def strainer(self):
"""
Determine whether it is required to run the MLST analyses
"""
# Initialise a variable to store whether the analyses need to be performed
analyse = list()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
try:
# Try to open the final report from the analyses. If it exists, then the analyses don't need to be
# performed again.
if os.path.isfile('{}{}_{}.csv'.format(sample[self.analysistype].reportdir, sample.name,
self.analysistype)):
if self.analysistype == 'rmlst':
# Run the allele updater method
updatecall, allelefolder = getrmlsthelper(self.referencefilepath, self.updatedatabases,
self.start)
else:
# referencefilepath, start, organism, update
allelefolder = getmlsthelper(self.referencefilepath, self.start,
sample.general.referencegenus, self.updatedatabases)
# Alleles have a .tfa extension
self.alleles = glob('{}/*.tfa'.format(allelefolder))
sample[self.analysistype].alleles = self.alleles
sample[self.analysistype].allelenames = [os.path.split(x)[1].split('.')[0] for x in
self.alleles]
# The analyses have already been successfully completed
analyse.append(False)
# Otherwise run the analyses
else:
self.populator(sample)
analyse.append(True)
# If the attribute doesn't exist, then the analyses haven't been performed yet.
except (KeyError, AttributeError):
self.populator(sample)
analyse.append(True)
else:
self.populator(sample)
analyse.append(False)
# Only run the analyses if they have not completed successfully before
# if any(analyse):
# Run the MLST analyses
MLST(self) | Determine whether it is required to run the MLST analyses |
def wait_for_task_property(service, task, prop, timeout_sec=120):
"""Waits for a task to have the specified property"""
return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec) | Waits for a task to have the specified property |
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d | Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object. |
def imagetransformer_ae_cifar():
"""Hyperparameters for CIFAR-10 experiments."""
hparams = transformer_ae_small()
hparams.filter_size = 512
hparams.num_compress_steps = 3
hparams.startup_steps = 10000
hparams.is_2d = 0
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.num_heads = 8
hparams.task = "image"
hparams.ffn_layer = "conv_hidden_relu"
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.
hparams.pos = "timing" # timing, none
hparams.nbr_decoder_problems = 1
hparams.num_output_layers = 3
# TODO(trandustin): semhash doesn't work if filter_size != hidden_size. For
# now, set default to dvq.
hparams.bottleneck_kind = "dvq"
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
hparams.add_hparam("dilated_attention", False)
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 12
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_raster_scan", False)
hparams.add_hparam("shared_rel", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom
hparams.top["targets"] = modalities.image_channel_embeddings_top
hparams.drop_inputs = True
hparams.do_attend_compress = False
hparams.do_attend_decompress = False
return hparams | Hyperparameters for CIFAR-10 experiments. |
def clear_trash(cookie, tokens):
'''清空回收站, 将里面的所有文件都删除.'''
url = ''.join([
const.PAN_API_URL,
'recycle/clear?channel=chunlei&clienttype=0&web=1',
'&t=', util.timestamp(),
'&bdstoken=', tokens['bdstoken'],
])
# 使用POST方式发送命令, 但data为空.
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
}, data=''.encode())
if req:
content = req.data
return json.loads(content.decode())
else:
return None | 清空回收站, 将里面的所有文件都删除. |
def _get_all_styles(self):
"""
return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style.
"""
_styles = {}
def _get_style(bold=False, bg_col=None, border=None):
if (bold, bg_col, border) not in _styles:
_styles[(bold, bg_col, border)] = CellStyle(bold=bold,
bg_color=bg_col,
border=border)
return _styles[(bold, bg_col, border)]
ws_styles = {}
for table, (row, col) in self.__tables.values():
for r in range(row, row + table.header_height):
for c in range(col, col + table.width):
if isinstance(table.header_style, dict):
col_name = table.dataframe.columns[c - col]
style = table.header_style.get(col_name, _get_style(bold=True))
else:
style = table.header_style or _get_style(bold=True)
ws_styles[(r, c)] = style
for c in range(col, col + table.row_labels_width):
for r in range(row + table.header_height, row + table.height):
if isinstance(table.index_style, dict):
row_name = table.dataframe.index[r - row]
style = table.index_style.get(row_name, _get_style(bold=True))
else:
style = table.index_style or _get_style(bold=True)
ws_styles[(r, c)] = style
if table.style.stripe_colors or table.style.border:
num_bg_cols = len(table.style.stripe_colors) if \
table.style.stripe_colors else 1
bg_cols = table.style.stripe_colors if \
table.style.stripe_colors else None
for i, row_offset in enumerate(range(table.header_height,
table.height)):
for c in range(col, col + table.width):
bg_col = bg_cols[i % num_bg_cols] if bg_cols else None
style = _get_style(bold=None, bg_col=bg_col, border=table.style.border)
if (row + row_offset, c) in ws_styles:
style = style + ws_styles[(row + row_offset, c)]
ws_styles[(row + row_offset, c)] = style
for col_name, col_style in table.column_styles.items():
try:
col_offset = table.get_column_offset(col_name)
except KeyError:
continue
for i, r in enumerate(range(row + table.header_height, row + table.height)):
style = col_style
if (r, col + col_offset) in ws_styles:
style = ws_styles[(r, col + col_offset)] + style
ws_styles[(r, col + col_offset)] = style
for row_name, row_style in table.row_styles.items():
try:
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
for i, c in enumerate(range(col + table.row_labels_width, col + table.width)):
style = row_style
if (row + row_offset, c) in ws_styles:
style = ws_styles[(row + row_offset, c)] + style
ws_styles[(row + row_offset, c)] = style
for (row_name, col_name), cell_style in table.cell_styles.items():
try:
col_offset = table.get_column_offset(col_name)
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
style = cell_style
if (row + row_offset, col + col_offset) in ws_styles:
style = ws_styles[(row + row_offset, col + col_offset)] + style
ws_styles[(row + row_offset, col + col_offset)] = style
for (row, col), value in self.__values.items():
if isinstance(value, Value):
style = value.style
if style:
if (row, col) in ws_styles:
style = style + ws_styles[(row, col)]
ws_styles[(row, col)] = style
return ws_styles | return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style. |
def string_length(ctx, s=None):
'''
Yields one number
'''
if s is None:
s = ctx.node
elif callable(s):
s = next(s.compute(ctx), '')
yield len(s) | Yields one number |
def get_message_definitions(self, msgid_or_symbol: str) -> list:
"""Returns the Message object for this message.
:param str msgid_or_symbol: msgid_or_symbol may be either a numeric or symbolic id.
:raises UnknownMessageError: if the message id is not defined.
:rtype: List of MessageDefinition
:return: A message definition corresponding to msgid_or_symbol
"""
if msgid_or_symbol[1:].isdigit():
msgid_or_symbol = msgid_or_symbol.upper()
for source in (self._alternative_names, self._messages_definitions):
try:
return [source[msgid_or_symbol]]
except KeyError:
pass
error_msg = "No such message id or symbol '{msgid_or_symbol}'.".format(
msgid_or_symbol=msgid_or_symbol
)
raise UnknownMessageError(error_msg) | Returns the Message object for this message.
:param str msgid_or_symbol: msgid_or_symbol may be either a numeric or symbolic id.
:raises UnknownMessageError: if the message id is not defined.
:rtype: List of MessageDefinition
:return: A message definition corresponding to msgid_or_symbol |
def ret(eqdata, **kwargs):
"""
Generate a DataFrame where the sole column, 'Return',
is the return for the equity over the given number of sessions.
For example, if 'XYZ' has 'Adj Close' of `100.0` on 2014-12-15 and
`90.0` 4 *sessions* later on 2014-12-19, then the 'Return' value
for 2014-12-19 will be `-0.1`.
Parameters
----------
eqdata : DataFrame
Data such as that returned by `get()`
selection : str, optional
Column from which to determine growth values. Defaults to
'Adj Close'.
n_sessions : int
Number of sessions to count back for calculating today's
return. For example, if `n_sessions` is set to 4, return is
calculated relative to the price 4 sessions ago. Defaults
to 1 (price of previous session).
skipstartrows : int
Rows to skip at beginning of `eqdata` in addition to the 1 row that must
be skipped because the calculation relies on a prior data point.
Defaults to 0.
skipendrows : int
Rows to skip at end of `eqdata`. Defaults to 0.
outputcol : str, optional
Name for column of output dataframe. Defaults to 'Return'.
Returns
----------
out : DataFrame
See Also
--------
:func:`growth`
Notes
----------
The interval is the number of *sessions* between the 2 values
whose ratio is being measured, *not* the number of days (which
includes days on which the market is closed).
The percentage gain or loss is measured relative to the earlier
date, but the index date is the later date. The index is chose because
that is the date on which the value is known. The percentage measure is because
that is the way for calculating percent profit and loss.
"""
if 'outputcol' not in kwargs:
kwargs['outputcol'] = 'Return'
result = growth(eqdata, **kwargs)
result.values[:, :] -= 1.
return result | Generate a DataFrame where the sole column, 'Return',
is the return for the equity over the given number of sessions.
For example, if 'XYZ' has 'Adj Close' of `100.0` on 2014-12-15 and
`90.0` 4 *sessions* later on 2014-12-19, then the 'Return' value
for 2014-12-19 will be `-0.1`.
Parameters
----------
eqdata : DataFrame
Data such as that returned by `get()`
selection : str, optional
Column from which to determine growth values. Defaults to
'Adj Close'.
n_sessions : int
Number of sessions to count back for calculating today's
return. For example, if `n_sessions` is set to 4, return is
calculated relative to the price 4 sessions ago. Defaults
to 1 (price of previous session).
skipstartrows : int
Rows to skip at beginning of `eqdata` in addition to the 1 row that must
be skipped because the calculation relies on a prior data point.
Defaults to 0.
skipendrows : int
Rows to skip at end of `eqdata`. Defaults to 0.
outputcol : str, optional
Name for column of output dataframe. Defaults to 'Return'.
Returns
----------
out : DataFrame
See Also
--------
:func:`growth`
Notes
----------
The interval is the number of *sessions* between the 2 values
whose ratio is being measured, *not* the number of days (which
includes days on which the market is closed).
The percentage gain or loss is measured relative to the earlier
date, but the index date is the later date. The index is chose because
that is the date on which the value is known. The percentage measure is because
that is the way for calculating percent profit and loss. |
def scaled_pressure_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False):
'''
The pressure readings for the typical setup of one absolute and
differential pressure sensor. The units are as
specified in each field.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return self.send(self.scaled_pressure_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1) | The pressure readings for the typical setup of one absolute and
differential pressure sensor. The units are as
specified in each field.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t) |
def file_or_stderr(filename, *, mode="a", encoding="utf-8"):
"""Returns a context object wrapping either the given file or
stderr (if filename is None). This makes dealing with log files
more convenient.
"""
if filename is not None:
return open(filename, mode, encoding=encoding)
@contextmanager
def stderr_wrapper():
yield sys.stderr
return stderr_wrapper() | Returns a context object wrapping either the given file or
stderr (if filename is None). This makes dealing with log files
more convenient. |
def get_requirements():
"""
Returns the content of 'requirements.txt' in a list.
:return: The content of 'requirements.txt'.
:rtype: list(str)
"""
requirements = []
with open(
os.path.join(BASE_DIRECTORY, 'requirements.txt'),
'r',
encoding='utf-8'
) as requirements_file:
lines = requirements_file.readlines()
for line in lines:
requirements.append(line.strip())
return requirements | Returns the content of 'requirements.txt' in a list.
:return: The content of 'requirements.txt'.
:rtype: list(str) |
def validate(self, sources):
"""Validate the format of sources
"""
if not isinstance(sources, Root):
raise Exception("Source object expected")
parameters = self.get_uri_with_missing_parameters(sources)
for parameter in parameters:
logging.getLogger().warn('Missing parameter "%s" in uri of method "%s" in versions "%s"' % (parameter["name"], parameter["method"], parameter["version"])) | Validate the format of sources |
def _update_param(self):
r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided
"""
# Update the gamma parameter.
if not isinstance(self._gamma_update, type(None)):
self._gamma = self._gamma_update(self._gamma)
# Update lambda parameter.
if not isinstance(self._lambda_update, type(None)):
self._lambda_param = self._lambda_update(self._lambda_param) | r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided |
def _evaluate(self,x,y):
'''
Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc).
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
f = (
(1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1]
+ (1-alpha)*beta*self.f_values[x_pos-1,y_pos]
+ alpha*(1-beta)*self.f_values[x_pos,y_pos-1]
+ alpha*beta*self.f_values[x_pos,y_pos])
return f | Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc). |
def map_forecast_estimate(self):
""" get the prior and posterior forecast (prediction) expectations.
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe with prior and posterior forecast expected values
"""
assert self.forecasts is not None
islog = self.pst.parameter_data.partrans == "log"
par_map = self.map_parameter_estimate
par_map.loc[islog,:] = np.log10(par_map.loc[islog,:])
par_map = Matrix.from_dataframe(par_map.loc[:,["post_expt"]])
posts,priors = [],[]
post_expt = (self.predictions.T * par_map).to_dataframe()
for fname in self.forecast_names:
#fname = forecast.col_names[0]
pr = self.pst.res.loc[fname,"modelled"]
priors.append(pr)
posts.append(pr + post_expt.loc[fname,"post_expt"])
return pd.DataFrame(data=np.array([priors,posts]).transpose(),
columns=["prior_expt","post_expt"],
index=self.forecast_names) | get the prior and posterior forecast (prediction) expectations.
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe with prior and posterior forecast expected values |
def resources(ctx, gpu):
"""Get job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 resources
```
For GPU resources
\b
```bash
$ polyaxon job -j 2 resources --gpu
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().job.resources(user,
project_name,
_job,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) | Get job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 resources
```
For GPU resources
\b
```bash
$ polyaxon job -j 2 resources --gpu
``` |
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods) | Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874 |
def string(prompt=None, empty=False):
"""Prompt a string.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
empty : bool, optional
Allow an empty response.
Returns
-------
str or None
A str if the user entered a non-empty string.
None if the user pressed only Enter and ``empty`` was True.
"""
s = _prompt_input(prompt)
if empty and not s:
return None
else:
if s:
return s
else:
return string(prompt=prompt, empty=empty) | Prompt a string.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
empty : bool, optional
Allow an empty response.
Returns
-------
str or None
A str if the user entered a non-empty string.
None if the user pressed only Enter and ``empty`` was True. |
def getDependents(self, retracted=False):
"""
Returns a list of siblings who depend on us to calculate their result.
:param retracted: If false, retracted/rejected dependents are dismissed
:type retracted: bool
:return: Analyses the current analysis depends on
:rtype: list of IAnalysis
"""
def is_dependent(analysis):
calculation = analysis.getCalculation()
if not calculation:
return False
services = calculation.getRawDependentServices()
if not services:
return False
query = dict(UID=services, getKeyword=self.getKeyword())
services = api.search(query, "bika_setup_catalog")
return len(services) > 0
siblings = self.getSiblings(retracted=retracted)
return filter(lambda sib: is_dependent(sib), siblings) | Returns a list of siblings who depend on us to calculate their result.
:param retracted: If false, retracted/rejected dependents are dismissed
:type retracted: bool
:return: Analyses the current analysis depends on
:rtype: list of IAnalysis |
def add_on_channel_close_callback(self):
"""
Tell pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self._logger.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed) | Tell pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel. |
def ra_indices_for_traj(self, traj):
"""
Gives the indices for a trajectory file index (without changing the order within the trajectory itself).
:param traj: a trajectory file index
:return: a Nx1 - np.array of the indices corresponding to the trajectory index
"""
assert not self.uniform_stride, "requested random access indices, but is in uniform stride mode"
if traj in self.traj_keys:
return self.ra_indices_for_traj_dict[traj]
else:
return np.array([]) | Gives the indices for a trajectory file index (without changing the order within the trajectory itself).
:param traj: a trajectory file index
:return: a Nx1 - np.array of the indices corresponding to the trajectory index |
def get_a(name=None, ipv4addr=None, allow_array=True, **api_opts):
'''
Get A record
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_a name=abc.example.com
salt-call infoblox.get_a ipv4addr=192.168.3.5
'''
data = {}
if name:
data['name'] = name
if ipv4addr:
data['ipv4addr'] = ipv4addr
r = get_object('record:a', data=data, **api_opts)
if r and len(r) > 1 and not allow_array:
raise Exception('More than one result, use allow_array to return the data')
return r | Get A record
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_a name=abc.example.com
salt-call infoblox.get_a ipv4addr=192.168.3.5 |
def with_optimizer_tensor(self, tensor: Union[tf.Tensor, tf.Operation]) -> 'Optimization':
"""
Replace optimizer tensor.
:param model: Tensorflow tensor.
:return: Optimization instance self reference.
"""
self._optimizer_tensor = tensor
return self | Replace optimizer tensor.
:param model: Tensorflow tensor.
:return: Optimization instance self reference. |
def local_outgoing_hook(handler=None, coro=None):
"""add a callback to run every time a greenlet is switched away from
:param handler:
the callback function, must be a function taking 2 arguments:
- an integer indicating whether it is being called as an incoming (1)
hook or as an outgoing (2) hook (in this case it will always be 2).
- the coroutine being switched from (in this case it is the one
indicated by the ``coro`` argument to ``local_outgoing_hook``.
Be aware that only a weak reference to this function will be held.
:type handler: function
:param coro:
the coroutine for which to apply the trace hook (defaults to current)
:type coro: greenlet
"""
if handler is None:
return lambda h: local_outgoing_hook(h, coro)
if not hasattr(handler, "__call__"):
raise TypeError("trace hooks must be callable")
if coro is None:
coro = compat.getcurrent()
log.info("setting a coroutine local outgoing hook callback")
state.local_from_hooks.setdefault(coro, []).append(
weakref.ref(handler))
return handler | add a callback to run every time a greenlet is switched away from
:param handler:
the callback function, must be a function taking 2 arguments:
- an integer indicating whether it is being called as an incoming (1)
hook or as an outgoing (2) hook (in this case it will always be 2).
- the coroutine being switched from (in this case it is the one
indicated by the ``coro`` argument to ``local_outgoing_hook``.
Be aware that only a weak reference to this function will be held.
:type handler: function
:param coro:
the coroutine for which to apply the trace hook (defaults to current)
:type coro: greenlet |
def _mark_html_fields_as_safe(self, page):
"""
Mark the html content as safe so we don't have to use the safe
template tag in all cms templates:
"""
page.title = mark_safe(page.title)
page.content = mark_safe(page.content)
return page | Mark the html content as safe so we don't have to use the safe
template tag in all cms templates: |
def marshal(self, v):
"""
Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully.
"""
if v:
orig = [i for i in self.choices if self.choices[i] == v]
if len(orig) == 1:
return orig[0]
elif len(orig) == 0:
# No such choice
raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self))
else:
# Too many choices. We could return one possible choice (e.g. orig[0]).
raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self)) | Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully. |
def format_table(table, column_names=None, column_specs=None, max_col_width=32, auto_col_width=False):
"""
Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']))
"""
orig_col_args = dict(column_names=column_names, column_specs=column_specs)
if len(table) > 0:
col_widths = [0] * len(table[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_col_names, id_column = [], None
if column_specs is not None:
column_names = ["Row"]
column_names.extend([col["name"] for col in column_specs])
column_specs = [{"name": "Row", "type": "float"}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
if column_names[i].lower() == "id":
id_column = i
my_col = ansi_truncate(str(column_names[i]), max_col_width if i not in {0, id_column} else 99)
my_col_names.append(my_col)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_col)))
trunc_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = ansi_truncate(str(row[i]), max_col_width if i not in {0, id_column} else 99)
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_item)))
trunc_table.append(my_row)
type_colormap = {"boolean": BLUE(),
"integer": YELLOW(),
"float": WHITE(),
"string": GREEN()}
for i in "uint8", "int16", "uint16", "int32", "uint32", "int64":
type_colormap[i] = type_colormap["integer"]
type_colormap["double"] = type_colormap["float"]
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]["type"]] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border("┌") + border("┬").join(border("─") * i for i in col_widths) + border("┐")]
if len(my_col_names) > 0:
padded_column_names = [col_head(i) + " " * (col_widths[i] - len(my_col_names[i]))
for i in range(len(my_col_names))]
formatted_table.append(border("│") + border("│").join(padded_column_names) + border("│"))
formatted_table.append(border("├") + border("┼").join(border("─") * i for i in col_widths) + border("┤"))
for row in trunc_table:
padded_row = [row[i] + " " * (col_widths[i] - len(strip_ansi_codes(row[i]))) for i in range(len(row))]
formatted_table.append(border("│") + border("│").join(padded_row) + border("│"))
formatted_table.append(border("└") + border("┴").join(border("─") * i for i in col_widths) + border("┘"))
if auto_col_width:
if not sys.stdout.isatty():
raise AegeaException("Cannot auto-format table, output is not a terminal")
table_width = len(strip_ansi_codes(formatted_table[0]))
tty_cols, tty_rows = get_terminal_size()
if table_width > max(tty_cols, 80):
return format_table(table, max_col_width=max_col_width - 1, auto_col_width=True, **orig_col_args)
return "\n".join(formatted_table) | Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])) |
def to_filelink(self):
"""
Checks is the status of the conversion is complete and, if so, converts to a Filelink
*returns* [Filestack.Filelink]
```python
filelink = av_convert.to_filelink()
```
"""
if self.status != 'completed':
return 'Audio/video conversion not complete!'
response = utils.make_call(self.url, 'get')
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['data']['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
raise Exception(response.text) | Checks is the status of the conversion is complete and, if so, converts to a Filelink
*returns* [Filestack.Filelink]
```python
filelink = av_convert.to_filelink()
``` |
def add_command_hooks(commands, srcdir='.'):
"""
Look through setup_package.py modules for functions with names like
``pre_<command_name>_hook`` and ``post_<command_name>_hook`` where
``<command_name>`` is the name of a ``setup.py`` command (e.g. build_ext).
If either hook is present this adds a wrapped version of that command to
the passed in ``commands`` `dict`. ``commands`` may be pre-populated with
other custom distutils command classes that should be wrapped if there are
hooks for them (e.g. `AstropyBuildPy`).
"""
hook_re = re.compile(r'^(pre|post)_(.+)_hook$')
# Distutils commands have a method of the same name, but it is not a
# *classmethod* (which probably didn't exist when distutils was first
# written)
def get_command_name(cmdcls):
if hasattr(cmdcls, 'command_name'):
return cmdcls.command_name
else:
return cmdcls.__name__
packages = find_packages(srcdir)
dist = get_dummy_distribution()
hooks = collections.defaultdict(dict)
for setuppkg in iter_setup_packages(srcdir, packages):
for name, obj in vars(setuppkg).items():
match = hook_re.match(name)
if not match:
continue
hook_type = match.group(1)
cmd_name = match.group(2)
if hook_type not in hooks[cmd_name]:
hooks[cmd_name][hook_type] = []
hooks[cmd_name][hook_type].append((setuppkg.__name__, obj))
for cmd_name, cmd_hooks in hooks.items():
commands[cmd_name] = generate_hooked_command(
cmd_name, dist.get_command_class(cmd_name), cmd_hooks) | Look through setup_package.py modules for functions with names like
``pre_<command_name>_hook`` and ``post_<command_name>_hook`` where
``<command_name>`` is the name of a ``setup.py`` command (e.g. build_ext).
If either hook is present this adds a wrapped version of that command to
the passed in ``commands`` `dict`. ``commands`` may be pre-populated with
other custom distutils command classes that should be wrapped if there are
hooks for them (e.g. `AstropyBuildPy`). |
def random_sample(self, elements=('a', 'b', 'c'), length=None):
"""
Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output.
"""
return self.random_elements(elements, length, unique=True) | Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output. |
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if is_dataframe(X):
self.X = X.values
if self.features_ is None:
self.features_ = X.columns
else:
self.X = X
self.y = y
super(MissingDataVisualizer, self).fit(X, y, **kwargs) | The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer |
def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None):
''' Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data.
'''
if not isinstance(pad, _DeprecatedParam):
print ("IntelHex.tobinarray: 'pad' parameter is deprecated.")
if pad is not None:
print ("Please, use IntelHex.padding attribute instead.")
else:
print ("Please, don't pass it explicitly.")
print ("Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)")
else:
pad = None
return self._tobinarray_really(start, end, pad, size) | Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data. |
def clear_to_enc_filename(fname):
"""
Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None
"""
if not fname.lower().endswith('.json'):
raise CredkeepException('Invalid filetype')
if fname.lower().endswith('.enc.json'):
raise CredkeepException('File already encrypted')
enc_fname = fname[:-4] + 'enc.json'
return enc_fname if exists(enc_fname) else None | Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None |
def confirm_email(self):
""" Confirm email """
if self._email and self.email_new:
self._email = self.email_new
self.email_confirmed = True
self.email_link = None
self.email_new = None
self.email_link_expires = None | Confirm email |
def set_digital_line_state(line_name, state):
"""Set the state of a single digital line.
line_name (str) - The physical name of the line.
e.g line_name="Dev1/port0/line3"
This should be a single digital line. Specifying more than one would
result in unexpected behaviour. For example "Dev1/port0/line0:5" is
not allowed.
see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/
for details of naming lines.
state (bool) - state=True sets the line to high, state=False sets to low.
"""
# get the line number from the line name. Thats the number of bits to shift
bits_to_shift = int(line_name.split('line')[-1])
dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift)
# Note here that the number of samples written here are 2, which is the
# minimum required for a buffered write. If we configure a timing for the
# write, it is considered buffered.
# see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/
DigitalOutputTask(line_name, dig_data).StartAndWait() | Set the state of a single digital line.
line_name (str) - The physical name of the line.
e.g line_name="Dev1/port0/line3"
This should be a single digital line. Specifying more than one would
result in unexpected behaviour. For example "Dev1/port0/line0:5" is
not allowed.
see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/
for details of naming lines.
state (bool) - state=True sets the line to high, state=False sets to low. |
def json_datetime_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
if ObjectId is not None and isinstance(obj, ObjectId):
# TODO: try to use bson.json_util instead
return str(obj)
raise TypeError("Type not serializable") | JSON serializer for objects not serializable by default json code |
def update(self, id, data):
"""
Replaces document with _id = id with data.
:param id: _id of document to update
:type id: ``string``
:param data: the new document to insert
:type data: ``string``
:return: id of replaced document
:rtype: ``dict``
"""
return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) | Replaces document with _id = id with data.
:param id: _id of document to update
:type id: ``string``
:param data: the new document to insert
:type data: ``string``
:return: id of replaced document
:rtype: ``dict`` |
def set_in(self, que_in, num_senders):
"""Set the queue in input and the number of parallel tasks that send inputs"""
for p in self.processes:
p.set_in(que_in, num_senders) | Set the queue in input and the number of parallel tasks that send inputs |
def write(self, offset, data):
"""
Write C{data} into this file at position C{offset}. Extending the
file past its original end is expected. Unlike python's normal
C{write()} methods, this method cannot do a partial write: it must
write all of C{data} or else return an error.
The default implementation checks for an attribute on C{self} named
C{writefile}, and if present, performs the write operation on the
python file-like object found there. The attribute is named
differently from C{readfile} to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
@param offset: position in the file to start reading from.
@type offset: int or long
@param data: data to write into the file.
@type data: str
@return: an SFTP error code like L{SFTP_OK}.
"""
writefile = getattr(self, 'writefile', None)
if writefile is None:
return SFTP_OP_UNSUPPORTED
try:
# in append mode, don't care about seeking
if (self.__flags & os.O_APPEND) == 0:
if self.__tell is None:
self.__tell = writefile.tell()
if offset != self.__tell:
writefile.seek(offset)
self.__tell = offset
writefile.write(data)
writefile.flush()
except IOError, e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
if self.__tell is not None:
self.__tell += len(data)
return SFTP_OK | Write C{data} into this file at position C{offset}. Extending the
file past its original end is expected. Unlike python's normal
C{write()} methods, this method cannot do a partial write: it must
write all of C{data} or else return an error.
The default implementation checks for an attribute on C{self} named
C{writefile}, and if present, performs the write operation on the
python file-like object found there. The attribute is named
differently from C{readfile} to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
@param offset: position in the file to start reading from.
@type offset: int or long
@param data: data to write into the file.
@type data: str
@return: an SFTP error code like L{SFTP_OK}. |
def _from_api_repr(cls, resource):
"""Returns a job reference for an API resource representation."""
job_id = resource.get("jobId")
project = resource.get("projectId")
location = resource.get("location")
job_ref = cls(job_id, project, location)
return job_ref | Returns a job reference for an API resource representation. |
def metric_get(self, project, metric_name):
"""API call: retrieve a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The metric object returned from the API (converted from a
protobuf to a dictionary).
"""
path = "projects/%s/metrics/%s" % (project, metric_name)
metric_pb = self._gapic_api.get_log_metric(path)
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(metric_pb) | API call: retrieve a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The metric object returned from the API (converted from a
protobuf to a dictionary). |
def ph_supconj(b, orbit, solve_for=None, **kwargs):
"""
TODO: add documentation
"""
orbit_ps = _get_system_ps(b, orbit)
# metawargs = orbit_ps.meta
#metawargs.pop('qualifier')
# t0_ph0 and phshift both exist by default, so we don't have to worry about creating either
# t0_ph0 = orbit_ps.get_parameter(qualifier='t0_ph0')
# phshift = orbit_ps.get_parameter(qualifier='phshift')
ph_supconj = orbit_ps.get_parameter(qualifier='ph_supconj')
per0 = orbit_ps.get_parameter(qualifier='per0')
ecc = orbit_ps.get_parameter(qualifier='ecc')
period = orbit_ps.get_parameter(qualifier='period')
# true_anom_supconj = pi/2 - per0
# mean_anom_supconj = true_anom_supconj - ecc*sin(true_anom_supconj)
# ph_supconj = (mean_anom_supconj + per0) / (2 * pi) - 1/4
if solve_for in [None, ph_supconj]:
lhs = ph_supconj
#true_anom_supconj = np.pi/2*u.rad - per0
true_anom_supconj = -1*(per0 - 360*u.deg)
rhs = _true_anom_to_phase(true_anom_supconj, period, ecc, per0)
#elif solve_for in [per0]:
# raise NotImplementedError("phshift constraint does not support solving for per0 yet")
else:
raise NotImplementedError
return lhs, rhs, {'orbit': orbit} | TODO: add documentation |
def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained |
def commit(self):
"""Commit the transaction"""
self.flush()
if hasattr(self, 'transaction') and self.transaction.is_active:
self.transaction.commit()
elif hasattr(self, 'connection'):
self.connection.commit() | Commit the transaction |
def dump_table(self, table, drop_statement=True):
"""Export a table structure and data to SQL file for backup or later import."""
create_statement = self.get_table_definition(table)
data = self.select_all(table)
statements = ['\n', sql_file_comment(''),
sql_file_comment('Table structure and data dump for {0}'.format(table)), sql_file_comment('')]
if drop_statement:
statements.append('\nDROP TABLE IF EXISTS {0};'.format(wrap(table)))
statements.append('{0};\n'.format(create_statement))
if len(data) > 0:
statements.append('{0};'.format(insert_statement(table, self.get_columns(table), data)))
return '\n'.join(statements) | Export a table structure and data to SQL file for backup or later import. |
def transform(self, path):
"""
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object.
"""
if path is None or not path:
return None
obj_parent_modules = path.split(".")
objects = [obj_parent_modules.pop(-1)]
while True:
try:
parent_module_path = ".".join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if len(obj_parent_modules) == 1:
raise ImportError("No module named '%s'" % obj_parent_modules[0])
objects.insert(0, obj_parent_modules.pop(-1))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
return current_object | Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object. |
def stop(self):
"""Stops the worker threads and waits for them to finish"""
self.working = False
for w in self.workers:
w.join()
self.workers = [] | Stops the worker threads and waits for them to finish |
async def reportCompleted(self, *args, **kwargs):
"""
Report Run Completed
Report a task completed, resolving the run as `completed`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs) | Report Run Completed
Report a task completed, resolving the run as `completed`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` |
def rvs(self, *args, **kwargs):
"""Draw Random Variates.
Parameters
----------
size: int, optional (default=1)
random_state_: optional (default=None)
"""
# TODO REVERSE THIS FUCK PYTHON2
size = kwargs.pop('size', 1)
random_state = kwargs.pop('size', None)
# don't ask me why it uses `self._size`
return self._kde.sample(n_samples=size, random_state=random_state) | Draw Random Variates.
Parameters
----------
size: int, optional (default=1)
random_state_: optional (default=None) |
def fixed_string(self, data=None):
"""
The fixed string is used to identify a particular Yubikey device.
The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode.
The length of the fixed string can be set between 0 and 16 bytes.
Tip: This can also be used to extend the length of a static password.
"""
old = self.fixed
if data != None:
new = self._decode_input_string(data)
if len(new) <= 16:
self.fixed = new
else:
raise yubico_exception.InputError('The "fixed" string must be 0..16 bytes')
return old | The fixed string is used to identify a particular Yubikey device.
The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode.
The length of the fixed string can be set between 0 and 16 bytes.
Tip: This can also be used to extend the length of a static password. |
def load_tiff(file):
"""
Load a geotiff raster keeping ndv values using a masked array
Usage:
data = load_tiff(file)
"""
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(file)
data = gdalnumeric.LoadFile(file)
data = np.ma.masked_array(data, mask=data == ndv, fill_value=ndv)
return data | Load a geotiff raster keeping ndv values using a masked array
Usage:
data = load_tiff(file) |
def add_markdown(self, markdown):
"""stub"""
if markdown is None:
raise NullArgument('markdown cannot be None')
if not self.my_osid_object_form._is_valid_string(
markdown, self.get_markdown_metadata()):
raise InvalidArgument('markdown')
self.my_osid_object_form._my_map['markdown'] = markdown | stub |
def logToFile(path, level=logging.INFO):
"""
Create a log handler that logs to the given file.
"""
logger = logging.getLogger()
logger.setLevel(level)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s %(message)s')
handler = logging.FileHandler(path)
handler.setFormatter(formatter)
logger.addHandler(handler) | Create a log handler that logs to the given file. |
def read_mm_uic2(fd, byte_order, dtype, count):
"""Read MM_UIC2 tag from file and return as dictionary."""
result = {'number_planes': count}
values = numpy.fromfile(fd, byte_order+'I', 6*count)
result['z_distance'] = values[0::6] // values[1::6]
#result['date_created'] = tuple(values[2::6])
#result['time_created'] = tuple(values[3::6])
#result['date_modified'] = tuple(values[4::6])
#result['time_modified'] = tuple(values[5::6])
return result | Read MM_UIC2 tag from file and return as dictionary. |
def printProfile(self, reset=False):
"""
Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset.
"""
print "Profiling information for {}".format(type(self).__name__)
totalTime = 0.000001
for region in self.network.regions.values():
timer = region.getComputeTimer()
totalTime += timer.getElapsed()
# Sort the region names
regionNames = list(self.network.regions.keys())
regionNames.sort()
count = 1
profileInfo = []
L2Time = 0.0
L4Time = 0.0
for regionName in regionNames:
region = self.network.regions[regionName]
timer = region.getComputeTimer()
count = max(timer.getStartCount(), count)
profileInfo.append([region.name,
timer.getStartCount(),
timer.getElapsed(),
100.0 * timer.getElapsed() / totalTime,
timer.getElapsed() / max(timer.getStartCount(), 1)])
if "L2Column" in regionName:
L2Time += timer.getElapsed()
elif "L4Column" in regionName:
L4Time += timer.getElapsed()
profileInfo.append(
["Total time", "", totalTime, "100.0", totalTime / count])
print tabulate(profileInfo, headers=["Region", "Count",
"Elapsed", "Pct of total",
"Secs/iteration"],
tablefmt="grid", floatfmt="6.3f")
print
print "Total time in L2 =", L2Time
print "Total time in L4 =", L4Time
if reset:
self.resetProfile() | Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset. |
def setup_sft_obs(sft_file,ins_file=None,start_datetime=None,times=None,ncomp=1):
"""writes an instruction file for a mt3d-usgs sft output file
Parameters
----------
sft_file : str
the sft output file (ASCII)
ins_file : str
the name of the instruction file to create. If None, the name
is <sft_file>.ins. Default is None
start_datetime : str
a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is None
times : iterable
a container of times to make observations for. If None, all times are used.
Default is None.
ncomp : int
number of components in transport model. Default is 1.
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sft simulated concentrations and flows.
If inschek was not successfully run, then returns None
Note
----
setups up observations for SW conc, GW conc and flowgw for all times and reaches.
"""
df = pd.read_csv(sft_file,skiprows=1,delim_whitespace=True)
df.columns = [c.lower().replace("-","_") for c in df.columns]
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if t not in utimes:
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(','.join(missing)))
with open("sft_obs.config",'w') as f:
f.write(sft_file+'\n')
[f.write("{0:15.6E}\n".format(t)) for t in times]
df = apply_sft_obs()
utimes = df.time.unique()
for t in times:
assert t in utimes,"time {0} missing in processed dataframe".format(t)
idx = df.time.apply(lambda x: x in times)
if start_datetime is not None:
start_datetime = pd.to_datetime(start_datetime)
df.loc[:,"time_str"] = pd.to_timedelta(df.time,unit='d') + start_datetime
df.loc[:,"time_str"] = df.time_str.apply(lambda x: datetime.strftime(x,"%Y%m%d"))
else:
df.loc[:,"time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
df.loc[:,"ins_str"] = "l1\n"
# check for multiple components
df_times = df.loc[idx,:]
df.loc[:,"icomp"] = 1
icomp_idx = list(df.columns).index("icomp")
for t in times:
df_time = df.loc[df.time==t,:]
vc = df_time.sfr_node.value_counts()
ncomp = vc.max()
assert np.all(vc.values==ncomp)
nstrm = df_time.shape[0] / ncomp
for icomp in range(ncomp):
s = int(nstrm*(icomp))
e = int(nstrm*(icomp+1))
idxs = df_time.iloc[s:e,:].index
#df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1)
df_time.loc[idxs,"icomp"] = int(icomp+1)
df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\
format(x.sfr_node,x.icomp,x.time_str),axis=1)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = sft_file+".processed.ins"
with open(ins_file,'w') as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
#df = _try_run_inschek(ins_file,sft_file+".processed")
df = try_process_ins_file(ins_file,sft_file+".processed")
if df is not None:
return df
else:
return None | writes an instruction file for a mt3d-usgs sft output file
Parameters
----------
sft_file : str
the sft output file (ASCII)
ins_file : str
the name of the instruction file to create. If None, the name
is <sft_file>.ins. Default is None
start_datetime : str
a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is None
times : iterable
a container of times to make observations for. If None, all times are used.
Default is None.
ncomp : int
number of components in transport model. Default is 1.
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sft simulated concentrations and flows.
If inschek was not successfully run, then returns None
Note
----
setups up observations for SW conc, GW conc and flowgw for all times and reaches. |
def _init_map(self):
"""call these all manually because non-cooperative"""
DecimalAnswerFormRecord._init_map(self)
DecimalValuesFormRecord._init_map(self)
TextAnswerFormRecord._init_map(self)
TextsFormRecord._init_map(self)
super(edXNumericResponseAnswerFormRecord, self)._init_map() | call these all manually because non-cooperative |
def close(self):
"""Close and exit the connection."""
try:
self.ssh.close()
self.logger.debug("close connect succeed.")
except paramiko.SSHException as e:
self.unknown("close connect error: %s" % e) | Close and exit the connection. |
def createBlocksFromHTML(cls, html, encoding='utf-8'):
'''
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node
'''
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove()
return rootNode.blocks | createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node |
def _start(self, update_cmd):
"""Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API reuqest to check the status of
the operation.
"""
try:
self._poll(update_cmd)
except BadStatus:
self._operation.status = 'Failed'
self._exception = CloudError(self._response)
except BadResponse as err:
self._operation.status = 'Failed'
self._exception = CloudError(self._response, str(err))
except OperationFailed:
self._exception = CloudError(self._response)
except Exception as err:
self._exception = err
finally:
self._done.set()
callbacks, self._callbacks = self._callbacks, []
while callbacks:
for call in callbacks:
call(self._operation)
callbacks, self._callbacks = self._callbacks, [] | Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API reuqest to check the status of
the operation. |
def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True | Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. |
Subsets and Splits