text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val | 0.004854 |
def equalizeImage(img, save_path=None, name_additive='_eqHist'):
'''
Equalize the histogram (contrast) of an image
works with RGB/multi-channel images
and flat-arrays
@param img - image_path or np.array
@param save_path if given output images will be saved there
@param name_additive if given this additive will be appended to output images
@return output images if input images are numpy.arrays and no save_path is given
@return None elsewise
'''
if isinstance(img, string_types):
img = PathStr(img)
if not img.exists():
raise Exception("image path doesn't exist")
img_name = img.basename().replace('.', '%s.' % name_additive)
if save_path is None:
save_path = img.dirname()
img = cv2.imread(img)
if img.dtype != np.dtype('uint8'):
# openCV cannot work with float arrays or uint > 8bit
eqFn = _equalizeHistogram
else:
eqFn = cv2.equalizeHist
if len(img.shape) == 3: # multi channel img like rgb
for i in range(img.shape[2]):
img[:, :, i] = eqFn(img[:, :, i])
else: # grey scale image
img = eqFn(img)
if save_path:
img_name = PathStr(save_path).join(img_name)
cv2.imwrite(img_name, img)
return img | 0.002239 |
def propagate_cols(self, col_names, target_df_name, source_df_name,
down=True):
"""
Put the data for "col_name" from source_df into target_df
Used to get "azimuth" from sample table into measurements table
(for example).
Note: if getting data from the sample table, don't include "sample"
in the col_names list. It is included automatically.
"""
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
# make sure col_names are all available in source table
source_df = self.tables[source_df_name].df
if not set(col_names).issubset(source_df.columns):
for col in col_names[:]:
if col not in source_df.columns:
print("-W- Column '{}' isn't in {} table, skipping it".format(col, source_df_name))
col_names.remove(col)
if not col_names:
print("-W- Invalid or missing column names, could not propagate columns")
return
#
if down:
add_name = source_df_name[:-1]
if 'measurements' in self.tables.keys():
self.propagate_location_to_measurements()
elif 'specimens' in self.tables.keys():
self.propagate_location_to_specimens()
else:
self.propagate_name_down('location', 'sites')
else:
add_name = target_df_name[:-1]
# get dataframes for merge
target_df = self.tables[target_df_name].df
source_df = self.tables[source_df_name].df
backup_source_df = source_df.copy()
# finesse source_df to make sure it has all the right columns
# and no unnecessary duplicates
if source_df_name[:-1] not in source_df.columns:
source_df[source_df_name[:-1]] = source_df.index
source_df = source_df.drop_duplicates(inplace=False, subset=col_names + [source_df_name[:-1]])
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='ffill')
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='bfill')
# if the groupby/fillna operation fails due to pandas bug, do the same by hand:
if not len(source_df):
new = []
grouped = backup_source_df.groupby(backup_source_df.index)
for label, group in grouped:
new_group = group.fillna(method="ffill")
new_group = new_group.fillna(method="bfill")
new.append(new_group)
source_df = pd.concat(new, sort=True)
# if the groupby/fillna still doesn't work, we are out of luck
if not len(source_df):
return target_df
# propagate down
if down:
# do merge
target_df[add_name] = target_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names], how='left',
left_on=add_name, right_index=True,
suffixes=["_target", "_source"])
# propagate up
else:
# do merge
col_names.append(add_name)
source_df[add_name] = source_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names],
how='left', left_index=True,
right_on=add_name,
suffixes=['_target', '_source'])
target_df.index = target_df[add_name]
target_df.drop([add_name + "_source", add_name + "_target"], axis=1, inplace=True)
# ignore any duplicate rows
target_df.drop_duplicates(inplace=True)
# mess with target_df to remove un-needed merge columns
for col in col_names:
# if there has been a previous merge, consolidate and delete data
if col + "_target" in target_df.columns:
# prioritize values from target df
new_arr = np.where(target_df[col + "_target"],
target_df[col + "_target"],
target_df[col + "_source"])
target_df.rename(columns={col + "_target": col}, inplace=True)
target_df[col] = new_arr
if col + "_source" in target_df.columns:
# delete extra merge column
del target_df[col + "_source"]
#
# drop any duplicate rows
target_df.drop_duplicates(inplace=True)
self.tables[target_df_name].df = target_df
return target_df | 0.001965 |
def main():
"""
NAME
chi_magic.py
DESCRIPTION
plots magnetic susceptibility as a function of frequency and temperature and AC field
SYNTAX
chi_magic.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of FILE and temperature step
-f FILE, specify magic_measurements format file
-T IND, specify temperature step to plot
-e EXP, specify experiment name to plot
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-sav save figure and quit
DEFAULTS
FILE: magic_measurements.txt
IND: first
SPEC: step through one by one
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
meas_file = "magic_measurements.txt"
spec = ""
Tind, cont = 0, ""
EXP = ""
fmt = 'svg' # default image type for saving
plot = 0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
fname = input(
"Input magic_measurements file name? [magic_measurements.txt] ")
if fname != "":
meas_file = fname
if '-e' in sys.argv:
ind = sys.argv.index('-e')
EXP = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-T' in sys.argv:
ind = sys.argv.index('-T')
Tind = int(sys.argv[ind+1])
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
#
meas_data, file_type = pmag.magic_read(meas_file)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k+1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
if 'measurement_x' in rec.keys():
# backward compatibility
X.append(float(rec['measurement_x']))
else:
# data model 2.5
X.append(float(rec['measurement_chi_volume']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names)+1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum}) # make it visible
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
plotnum += 1 # increment plot number
if '-i' in sys.argv:
for ind in range(len(Ts)): # print list of temperatures available
print(ind, int(Ts[ind]))
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == 'a':
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = key
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
cont = 'q'
while cont != "q":
if '-i' in sys.argv:
Tind = int(cont) # set temperature index
b = Bs[-1] # set field to max available
XF = [] # initial chi versus frequency list
for kk in range(len(X)): # hunt through the data
if T[kk] == Ts[Tind] and B[kk] == b: # if temperature and field match,
XF.append([X[kk], F[kk]]) # append the data
if len(XF) > 1: # if there are any data to plot
if FTinit == 0: # if not already initialized, initialize plot
# print 'initializing ',plotnum
pmagplotlib.plot_init(plotnum, 5, 5)
FTinit = 1
XFplot = plotnum
plotnum += 1 # increment plotnum
pmagplotlib.plot_xft(XFplot, XF, Ts[Tind], e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'\n *** Skipping susceptibitily-frequency plot as a function of temperature *** \n')
f = Fs[0] # set frequency to minimum available
XB = [] # initialize chi versus field list
for kk in range(len(X)): # hunt through the data
# if temperature and field match those desired
if T[kk] == Ts[Tind] and F[kk] == f:
XB.append([X[kk], B[kk]]) # append the data to list
if len(XB) > 4: # if there are any data
if BTinit == 0: # if plot not already initialized
pmagplotlib.plot_init(plotnum, 5, 5) # do it
BTinit = 1
# and call plotting function
pmagplotlib.plot_xbt(plotnum, XB, Ts[Tind], e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'Skipping susceptibitily - AC field plot as a function of temperature')
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = p
if '-i' in sys.argv:
# just in case you forgot, print out a new list of temperatures
for ind in range(len(Ts)):
print(ind, int(Ts[ind]))
# ask for new temp
cont = input(
"Enter index of next temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
if cont == 'a':
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
elif plot == 0:
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
pmagplotlib.save_plots(PLTS, files)
sys.exit()
else:
sys.exit()
else:
pmagplotlib.save_plots(PLTS, files)
sys.exit() | 0.00169 |
def searchadmin(self, searchstring=None):
""" search user page """
self._check_auth(must_admin=True)
is_admin = self._check_admin()
if searchstring is not None:
res = self._search(searchstring)
else:
res = None
attrs_list = self.attributes.get_search_attributes()
return self.temp['searchadmin.tmpl'].render(
searchresult=res,
attrs_list=attrs_list,
is_admin=is_admin,
custom_js=self.custom_js,
notifications=self._empty_notification(),
) | 0.00339 |
def get_display(self):
"""
returns information about the display, including
brightness, screensaver etc.
"""
log.debug("getting display information...")
cmd, url = DEVICE_URLS["get_display"]
return self._exec(cmd, url) | 0.007299 |
def delete_user(self, username, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html>`_
:arg username: username
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if username in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'username'.")
return self.transport.perform_request(
"DELETE", _make_path("_security", "user", username), params=params
) | 0.004944 |
def crypto_scalarmult_ed25519_base(n):
"""
Computes and returns the scalar product of a standard group element and an
integer ``n`` on the edwards25519 curve.
:param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
sequence representing a scalar
:type n: bytes
:return: a point on the edwards25519 curve, represented as a
:py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
:rtype: bytes
"""
ensure(isinstance(n, bytes) and
len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
'Input must be a {} long bytes sequence'.format(
'crypto_scalarmult_ed25519_SCALARBYTES'),
raising=exc.TypeError)
q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
rc = lib.crypto_scalarmult_ed25519_base(q, n)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:] | 0.000999 |
def get_rt_ticker(self, code, num=500):
"""
获取指定股票的实时逐笔。取最近num个逐笔
:param code: 股票代码
:param num: 最近ticker个数(有最大个数限制,最近1000个)
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
stock_code str 股票代码
sequence int 逐笔序号
time str 成交时间(美股默认是美东时间,港股A股默认是北京时间)
price float 成交价格
volume int 成交数量(股数)
turnover float 成交金额
ticker_direction str 逐笔方向
type str 逐笔类型,参见TickerType
===================== =========== ==============================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
TickerQuery.pack_req,
TickerQuery.unpack_rsp,
)
kargs = {
"code": code,
"num": num,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, ticker_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
'code', 'time', 'price', 'volume', 'turnover', "ticker_direction",
'sequence', 'type'
]
ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)
return RET_OK, ticker_frame_table | 0.002686 |
def _is_whitelisted(self, email):
"""Check if an email is in the whitelist. If there's no whitelist,
it's assumed it's not whitelisted."""
return hasattr(settings, "SAFE_EMAIL_WHITELIST") and \
any(re.match(m, email) for m in settings.SAFE_EMAIL_WHITELIST) | 0.006826 |
def sg_int(tensor, opt):
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_intx, name=opt.name) | 0.005063 |
def update(verbose=False):
"""
Update local dictionnaries by downloading the latest version from the
server, if there's one.
"""
local = local_list()
remote = dict(remote_list())
updated = False
for name, date in local:
if name in remote and remote[name] > date:
updated = True
if verbose:
print("Updating '%s'..." % name)
download(name)
if not updated and verbose:
print("Nothing to update.") | 0.002 |
def iter(self, offset=0, count=None, pagesize=None, **kwargs):
"""Iterates over the collection.
This method is equivalent to the :meth:`list` method, but
it returns an iterator and can load a certain number of entities at a
time from the server.
:param offset: The index of the first entity to return (optional).
:type offset: ``integer``
:param count: The maximum number of entities to return (optional).
:type count: ``integer``
:param pagesize: The number of entities to load (optional).
:type pagesize: ``integer``
:param kwargs: Additional arguments (optional):
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
**Example**::
import splunklib.client as client
s = client.connect(...)
for saved_search in s.saved_searches.iter(pagesize=10):
# Loads 10 saved searches at a time from the
# server.
...
"""
assert pagesize is None or pagesize > 0
if count is None:
count = self.null_count
fetched = 0
while count == self.null_count or fetched < count:
response = self.get(count=pagesize or count, offset=offset, **kwargs)
items = self._load_list(response)
N = len(items)
fetched += N
for item in items:
yield item
if pagesize is None or N < pagesize:
break
offset += N
logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs) | 0.001966 |
def create_or_update_lun_id(self, volume_id, lun_id):
"""Set the LUN ID on a volume.
:param integer volume_id: The id of the volume
:param integer lun_id: LUN ID to set on the volume
:return: a SoftLayer_Network_Storage_Property object
"""
return self.client.call('Network_Storage', 'createOrUpdateLunId',
lun_id, id=volume_id) | 0.004902 |
def show_plain_text(self, text):
"""Show text in plain mode"""
self.switch_to_plugin()
self.switch_to_plain_text()
self.set_plain_text(text, is_code=False) | 0.010471 |
def stop(self):
"""
Stop this server so that the calling process can exit
"""
# unsetup_fuse()
self.fuse_process.teardown()
for uuid in self.processes:
self.processes[uuid].terminate() | 0.008197 |
async def list(self) -> List[str]:
"""
Return list of pool names configured, empty list for none.
:return: list of pool names.
"""
LOGGER.debug('NodePoolManager.list >>>')
rv = [p['pool'] for p in await pool.list_pools()]
LOGGER.debug('NodePoolManager.list <<< %s', rv)
return rv | 0.005764 |
def compose(funcs:List[Callable])->Callable:
"Compose `funcs`"
def compose_(funcs, x, *args, **kwargs):
for f in listify(funcs): x = f(x, *args, **kwargs)
return x
return partial(compose_, funcs) | 0.017937 |
def get_snippet_content(snippet_name, **format_kwargs):
""" Load the content from a snippet file which exists in SNIPPETS_ROOT """
filename = snippet_name + '.snippet'
snippet_file = os.path.join(SNIPPETS_ROOT, filename)
if not os.path.isfile(snippet_file):
raise ValueError('could not find snippet with name ' + filename)
ret = helpers.get_file_content(snippet_file)
if format_kwargs:
ret = ret.format(**format_kwargs)
return ret | 0.00211 |
def _multiple_replace(text, search_replace_dict):
"""
Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm'
"""
# Create a regular expression from all of the dictionary keys
regex = re.compile("|".join(map(re.escape, search_replace_dict.keys())))
# For each match, look up the corresponding value in the dictionary
return regex.sub(lambda match: search_replace_dict[match.group(0)], text) | 0.001475 |
def main():
"""Return 0 on success."""
args = parse_args()
if not args.files:
return 0
with enable_sphinx_if_possible():
status = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count())
try:
if len(args.files) > 1:
results = pool.map(
_check_file,
[(name, args) for name in args.files])
else:
# This is for the case where we read from standard in.
results = [_check_file((args.files[0], args))]
for (filename, errors) in results:
for error in errors:
line_number = error[0]
message = error[1]
if not re.match(r'\([A-Z]+/[0-9]+\)', message):
message = '(ERROR/3) ' + message
output_message('{}:{}: {}'.format(filename,
line_number,
message))
status = 1
except (IOError, UnicodeError) as exception:
output_message(exception)
status = 1
return status | 0.000822 |
def handle_message(self, msg):
"""manage message of different types, and colorize output
using ansi escape codes
"""
if msg.module not in self._modules:
color, style = self._get_decoration("S")
if msg.module:
modsep = colorize_ansi(
"************* Module %s" % msg.module, color, style
)
else:
modsep = colorize_ansi("************* %s" % msg.module, color, style)
self.writeln(modsep)
self._modules.add(msg.module)
color, style = self._get_decoration(msg.C)
msg = msg._replace(
**{
attr: colorize_ansi(getattr(msg, attr), color, style)
for attr in ("msg", "symbol", "category", "C")
}
)
self.write_message(msg) | 0.003492 |
def get_list_transformer(namespaces):
"""this function returns a transformer to
find all list elements and recompute their xml:id.
Because if we duplicate lists we create invalid XML.
Each list must have its own xml:id
This is important if you want to be able to reopen the produced
document wih an XML parser. LibreOffice will fix those ids itself
silently, but lxml.etree.parse will bork on such duplicated lists
"""
return Transformer(
'//list[namespace-uri()="%s"]' % namespaces.get(
'text'
)
).attr(
'{0}id'.format(XML_NS),
lambda *args: "list{0}".format(uuid4().hex)
) | 0.001502 |
def launch_server(message_handler, options):
"""
Launch a message server
:param handler_function: The handler function to execute for each message
:param options: Application options for TCP, etc.
"""
logger = logging.getLogger(__name__)
# if (options.debug):
# logger.setLevel(logging.DEBUG)
# if not options.monitor_port:
# logger.warning(
# "Monitoring not enabled. No monitor-port option defined.")
# else:
# threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start()
# Create the server, binding to specified host on configured port
# logger.info(
# 'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3]))
# server = ThreadedTCPServer((options.host, options.port),
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
try:
while True:
logger.debug('waiting for more data')
if not message_handler.handle():
break
logger.warning("I/O stream closed from client")
except KeyboardInterrupt:
logger.info("I/O stream closed from client exiting...")
os._exit(142)
except:
logger.exception("Error encountered handling message") | 0.004418 |
def write_constraints(self, table):
"""Write DDL of `table` constraints to the output file
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
self.f.write('\n'.join(super(PostgresFileWriter, self).write_constraints(table))) | 0.010283 |
def run(self):
"""Periodically sends buffered operations and/or commit.
"""
if not self._should_auto_commit and not self._should_auto_send:
return
last_send, last_commit = 0, 0
while not self._stopped:
if self._should_auto_commit:
if last_commit > self._commit_interval:
self._docman.commit()
# commit also sends so reset both
last_send, last_commit = 0, 0
# Give a chance to exit the loop
if self._stopped:
break
if self._should_auto_send:
if last_send > self._send_interval:
self._docman.send_buffered_operations()
last_send = 0
time.sleep(self._sleep_interval)
last_send += self._sleep_interval
last_commit += self._sleep_interval | 0.002123 |
def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out | 0.011364 |
def create_request(self):
"""Set download requests
Create a list of DownloadRequests for all Sentinel-2 acquisitions within request's time interval and
acceptable cloud coverage.
"""
fis_service = FisService(instance_id=self.instance_id)
self.download_list = fis_service.get_request(self) | 0.008902 |
def hiddenColumns( self ):
"""
Returns a list of the hidden columns for this tree.
:return [<str>, ..]
"""
output = []
columns = self.columns()
for c, column in enumerate(columns):
if ( not self.isColumnHidden(c) ):
continue
output.append(column)
return output | 0.020513 |
def _sample_points(X, centers, oversampling_factor, random_state):
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
"""
# re-implement evaluate_cost here, to avoid redundant computation
distances = pairwise_distances(X, centers).min(1) ** 2
denom = distances.sum()
p = oversampling_factor * distances / denom
draws = random_state.uniform(size=len(p), chunks=p.chunks)
picked = p > draws
new_idxs, = da.where(picked)
return new_idxs | 0.001761 |
def calculate_fitness(self):
"""Calculcate your fitness."""
if self.fitness is not None:
raise Exception("You are calculating the fitness of agent {}, "
.format(self.id) +
"but they already have a fitness")
infos = self.infos()
said_blue = ([i for i in infos if
isinstance(i, Meme)][0].contents == "blue")
proportion = float(
max(State.query.filter_by(network_id=self.network_id).all(),
key=attrgetter('creation_time')).contents)
self.proportion = proportion
is_blue = proportion > 0.5
if said_blue is is_blue:
self.score = 1
else:
self.score = 0
is_asocial = [
i for i in infos if isinstance(i, LearningGene)
][0].contents == "asocial"
e = 2
b = 1
c = 0.3 * b
baseline = c + 0.0001
self.fitness = (baseline + self.score * b - is_asocial * c) ** e | 0.001936 |
def set_figure_params(
self,
scanpy=True,
dpi=80,
dpi_save=150,
frameon=True,
vector_friendly=True,
fontsize=14,
color_map=None,
format="pdf",
transparent=False,
ipython_format="png2x",
):
"""Set resolution/size, styling and format of figures.
Parameters
----------
scanpy : `bool`, optional (default: `True`)
Init default values for ``matplotlib.rcParams`` suited for Scanpy.
dpi : `int`, optional (default: `80`)
Resolution of rendered figures - this influences the size of figures in notebooks.
dpi_save : `int`, optional (default: `150`)
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon : `bool`, optional (default: `True`)
Add frames and axes labels to scatter plots.
vector_friendly : `bool`, optional (default: `True`)
Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
fontsize : `int`, optional (default: 14)
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map : `str`, optional (default: `None`)
Convenience method for setting the default color map. Ignored if `scanpy=False`.
format : {'png', 'pdf', 'svg', etc.}, optional (default: 'pdf')
This sets the default format for saving figures: `file_format_figs`.
transparent : `bool`, optional (default: `True`)
Save figures with transparent back ground. Sets
`rcParams['savefig.transparent']`.
ipython_format : list of `str`, optional (default: 'png2x')
Only concerns the notebook/IPython environment; see
`IPython.core.display.set_matplotlib_formats` for more details.
"""
try:
import IPython
IPython.core.display.set_matplotlib_formats(ipython_format)
except:
pass
from matplotlib import rcParams
self._vector_friendly = vector_friendly
self.file_format_figs = format
if dpi is not None:
rcParams["figure.dpi"] = dpi
if dpi_save is not None:
rcParams["savefig.dpi"] = dpi_save
if transparent is not None:
rcParams["savefig.transparent"] = transparent
if scanpy:
from .plotting._rcmod import set_rcParams_scanpy
set_rcParams_scanpy(fontsize=fontsize, color_map=color_map)
self._frameon = frameon | 0.00384 |
def volume(self, vol, clim=None, method='mip', threshold=None,
cmap='grays'):
"""Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
"""
self._configure_3d()
volume = scene.Volume(vol, clim, method, threshold, cmap=cmap)
self.view.add(volume)
self.view.camera.set_range()
return volume | 0.002604 |
def tplot_rename(old_name, new_name):
"""
This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variable 1 to Variable 2
>>> import pytplot
>>> pytplot.tplot_rename("Variable1", "Variable2")
"""
#check if old name is in current dictionary
if old_name not in pytplot.data_quants.keys():
print("That name is currently not in pytplot")
return
#if old name input is a number, convert to corresponding name
if isinstance(old_name, int):
old_name = pytplot.data_quants[old_name].name
#remake dictionary with new name in old name's slot
d = pytplot.data_quants
d2 = OrderedDict([(new_name, v) if k == old_name else (k, v) for k, v in d.items()])
data_quants = d2
for key in d2:
data_quants[key].name = key
pytplot.data_quants = data_quants
return | 0.012467 |
def author_names(self):
"""
Returns a dictionary like this:
{
"urn:cts:greekLit:tlg0012$$n1" : "Homer"
, "urn:cts:greekLit:tlg0012$$n2" : "Omero"
, ...
}
"""
return {"%s$$n%i" % (author.get_urn(), i): name[1]
for author in self.get_authors()
for i, name in enumerate(author.get_names())
if author.get_urn() is not None} | 0.004425 |
def get_rendered_fields(self, ctx=None):
'''
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
'''
if ctx is None:
ctx = RenderContext()
ctx.push(self)
current = self._fields[self._field_idx]
res = current.get_rendered_fields(ctx)
ctx.pop()
return res | 0.004854 |
def set_file(path, saltenv='base', **kwargs):
'''
Set answers to debconf questions from a file.
CLI Example:
.. code-block:: bash
salt '*' debconf.set_file salt://pathto/pkg.selections
'''
if '__env__' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('__env__')
path = __salt__['cp.cache_file'](path, saltenv)
if path:
_set_file(path)
return True
return False | 0.002203 |
def configure_graph(self, info):
""" Handles display of the graph dot traits.
"""
if info.initialized:
self.model.edit_traits(parent=info.ui.control,
kind="live", view=attr_view) | 0.013043 |
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Args:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data)) | 0.006601 |
def byte_str(nBytes, unit='bytes', precision=2):
"""
representing the number of bytes with the chosen unit
Returns:
str
"""
#return (nBytes * ureg.byte).to(unit.upper())
if unit.lower().startswith('b'):
nUnit = nBytes
elif unit.lower().startswith('k'):
nUnit = nBytes / (2.0 ** 10)
elif unit.lower().startswith('m'):
nUnit = nBytes / (2.0 ** 20)
elif unit.lower().startswith('g'):
nUnit = nBytes / (2.0 ** 30)
elif unit.lower().startswith('t'):
nUnit = nBytes / (2.0 ** 40)
else:
raise NotImplementedError('unknown nBytes=%r unit=%r' % (nBytes, unit))
return repr2(nUnit, precision=precision) + ' ' + unit | 0.005618 |
def find_for_player_id(player_id, connection=None, page_size=100,
page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER):
"""
List playlists for a for given player id.
"""
return pybrightcove.connection.ItemResultSet(
"find_playlists_for_player_id", Playlist, connection, page_size,
page_number, sort_by, sort_order, player_id=player_id) | 0.007194 |
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
) | 0.003051 |
def authorize(login, password, scopes, note='', note_url='', client_id='',
client_secret='', two_factor_callback=None):
"""Obtain an authorization token for the GitHub API.
:param str login: (required)
:param str password: (required)
:param list scopes: (required), areas you want this token to apply to,
i.e., 'gist', 'user'
:param str note: (optional), note about the authorization
:param str note_url: (optional), url for the application
:param str client_id: (optional), 20 character OAuth client key for which
to create a token
:param str client_secret: (optional), 40 character OAuth client secret for
which to create the token
:param func two_factor_callback: (optional), function to call when a
Two-Factor Authentication code needs to be provided by the user.
:returns: :class:`Authorization <Authorization>`
"""
gh = GitHub()
gh.login(two_factor_callback=two_factor_callback)
return gh.authorize(login, password, scopes, note, note_url, client_id,
client_secret) | 0.00091 |
def transform(self, X, y=None, scan_onsets=None):
""" Use the model to estimate the time course of response to
each condition (ts), and the time course unrelated to task
(ts0) which is spread across the brain.
This is equivalent to "decoding" the design matrix and
nuisance regressors from a new dataset different from the
training dataset on which fit() was applied. An AR(1) smooth
prior is imposed on the decoded ts and ts0 with the AR(1)
parameters learnt from the corresponding time courses in the
training data.
Notice: if you set the rank to be lower than the number of
experimental conditions (number of columns in the design
matrix), the recovered task-related activity will have
collinearity (the recovered time courses of some conditions
can be linearly explained by the recovered time courses
of other conditions).
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
y : not used (as it is unsupervised learning)
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ts : numpy arrays, shape = [time_points, condition]
The estimated response to the task conditions which have the
response amplitudes estimated during the fit step.
ts0: numpy array, shape = [time_points, n_nureg]
The estimated time course spread across the brain, with the
loading weights estimated during the fit step.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ts, ts0, log_p = self._transform(
Y=X, scan_onsets=scan_onsets, beta=self.beta_,
beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_,
rho_X=self._rho_design_, sigma2_X=self._sigma2_design_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
return ts, ts0 | 0.000656 |
def from_options(cls, options):
"""Given an `Options` object, produce a `ChangedRequest`."""
return cls(options.changes_since,
options.diffspec,
options.include_dependees,
options.fast) | 0.004202 |
def step_through(self, msg='', shutit_pexpect_child=None, level=1, print_input=True, value=True):
"""Implements a step-through function, using pause_point.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
if (not shutit_global.shutit_global_object.determine_interactive() or not shutit_global.shutit_global_object.interactive or
shutit_global.shutit_global_object.interactive < level):
return True
self.build['step_through'] = value
shutit_pexpect_session.pause_point(msg, print_input=print_input, level=level)
return True | 0.024161 |
def init(options=None, ini_paths=None, argv=None,
strict=False, **parser_kwargs):
"""Initialize singleton config and read/parse configuration.
:keyword bool strict: when true, will error out on invalid arguments
(default behavior is to ignore them)
:returns: the loaded configuration.
"""
global SINGLETON
SINGLETON = Config(
options=options,
ini_paths=ini_paths,
argv=argv,
**parser_kwargs)
SINGLETON.parse(argv, strict=strict)
return SINGLETON | 0.001898 |
def find_all_paths(G, start, end, path=[]):
"""
Find all paths between vertices start and end in graph.
"""
path = path + [start]
if start == end:
return [path]
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
paths = []
for vertex in G.vertices[start]:
if vertex not in path:
newpaths = find_all_paths(G, vertex, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths | 0.001563 |
def _is_healthiest_node(self, members, check_replication_lag=True):
"""This method tries to determine whether I am healthy enough to became a new leader candidate or not."""
_, my_wal_position = self.state_handler.timeline_wal_position()
if check_replication_lag and self.is_lagging(my_wal_position):
logger.info('My wal position exceeds maximum replication lag')
return False # Too far behind last reported wal position on master
if not self.is_standby_cluster() and self.check_timeline():
cluster_timeline = self.cluster.timeline
my_timeline = self.state_handler.replica_cached_timeline(cluster_timeline)
if my_timeline < cluster_timeline:
logger.info('My timeline %s is behind last known cluster timeline %s', my_timeline, cluster_timeline)
return False
# Prepare list of nodes to run check against
members = [m for m in members if m.name != self.state_handler.name and not m.nofailover and m.api_url]
if members:
for st in self.fetch_nodes_statuses(members):
if st.failover_limitation() is None:
if not st.in_recovery:
logger.warning('Master (%s) is still alive', st.member.name)
return False
if my_wal_position < st.wal_position:
logger.info('Wal position of %s is ahead of my wal position', st.member.name)
return False
return True | 0.005122 |
def repertoire(self, direction, mechanism, purview):
"""Return the cause or effect repertoire function based on a direction.
Args:
direction (str): The temporal direction, specifiying the cause or
effect repertoire.
"""
system = self.system[direction]
node_labels = system.node_labels
if not set(purview).issubset(self.purview_indices(direction)):
raise ValueError('{} is not a {} purview in {}'.format(
fmt.fmt_mechanism(purview, node_labels), direction, self))
if not set(mechanism).issubset(self.mechanism_indices(direction)):
raise ValueError('{} is no a {} mechanism in {}'.format(
fmt.fmt_mechanism(mechanism, node_labels), direction, self))
return system.repertoire(direction, mechanism, purview) | 0.002339 |
def send_security_email(data):
"""Celery task to send security email.
:param data: Contains the email data.
"""
msg = Message()
msg.__dict__.update(data)
current_app.extensions['mail'].send(msg) | 0.004566 |
def path_alias_regex(self, regex):
""" A decorator that adds a path-alias regular expression; calls
add_path_regex """
def decorator(func):
""" Adds the function to the regular expression alias list """
self.add_path_regex(regex, func)
return decorator | 0.006494 |
def _split_line(self, line):
"""Split line into field values."""
line = line.rstrip('\r\n')
flds = re.split('\t', line)
assert len(flds) == self.exp_numcol, "EXPECTED({E}) COLUMNS, ACTUAL({A}): {L}".format(
E=self.exp_numcol, A=len(flds), L=line)
return flds | 0.009677 |
def evaluate_detections(self, detections):
"""
top level evaluations
Parameters:
----------
detections: list
result list, each entry is a matrix of detections
Returns:
----------
None
"""
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval() | 0.004556 |
def intersects(self, other):
"""Returns true if this envelope intersects another.
Arguments:
other -- Envelope or tuple of (minX, minY, maxX, maxY)
"""
try:
return (self.min_x <= other.max_x and
self.max_x >= other.min_x and
self.min_y <= other.max_y and
self.max_y >= other.min_y)
except AttributeError:
return self.intersects(Envelope(other)) | 0.004193 |
def execute(self, command, consume=True):
"""
Sends the given data to the remote host (with a newline appended)
and waits for a prompt in the response. The prompt attempts to use
a sane default that works with many devices running Unix, IOS,
IOS-XR, or Junos and others. If that fails, a custom prompt may
also be defined using the set_prompt() method.
This method also modifies the value of the response (self.response)
attribute, for details please see the documentation of the
expect() method.
:type command: string
:param command: The data that is sent to the remote host.
:type consume: boolean (Default: True)
:param consume: Whether to consume the prompt from the buffer or not.
:rtype: int, re.MatchObject
:return: The index of the prompt regular expression that matched,
and the match object.
"""
self.send(command + '\r')
return self.expect_prompt(consume) | 0.001953 |
def handler(event, context): # pylint: disable=W0613
"""
Historical vpc event collector.
This collector is responsible for processing Cloudwatch events and polling events.
"""
records = deserialize_records(event['Records'])
# Split records into two groups, update and delete.
# We don't want to query for deleted records.
update_records, delete_records = group_records_by_type(records, UPDATE_EVENTS)
capture_delete_records(delete_records)
# filter out error events
update_records = [e for e in update_records if not e['detail'].get('errorCode')] # pylint: disable=C0103
# group records by account for more efficient processing
LOG.debug(f'[@] Update Records: {records}')
capture_update_records(update_records) | 0.005181 |
def demote(self):
"""Demote the bootstrap code to the end of the `sys.path` so it is found last.
:return: The list of un-imported bootstrap modules.
:rtype: list of :class:`types.ModuleType`
"""
import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.
unimported_modules = []
for name, module in reversed(sorted(sys.modules.items())):
if self.imported_from_bootstrap(module):
unimported_modules.append(sys.modules.pop(name))
sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]
sys.path.append(self._sys_path_entry)
return unimported_modules | 0.00744 |
def list_abundance_expansion(graph: BELGraph) -> None:
"""Flatten list abundances."""
mapping = {
node: flatten_list_abundance(node)
for node in graph
if isinstance(node, ListAbundance)
}
relabel_nodes(graph, mapping, copy=False) | 0.003717 |
def remove_router_interface(self, context, router_id, interface_info):
"""Remove a subnet of a network from an existing router."""
router_to_del = (
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info)
)
# Get network information of the subnet that is being removed
core = directory.get_plugin()
subnet = core.get_subnet(context, router_to_del['subnet_id'])
network_id = subnet['network_id']
# For SVI removal from Arista HW, segmentation ID is needed
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router = self.get_router(context, router_id)
router_info = copy.deepcopy(router_to_del)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
try:
self.driver.remove_router_interface(context, router_info)
return router_to_del
except Exception as exc:
LOG.error(_LE("Error removing interface %(interface)s from "
"router %(router_id)s on Arista HW"
"Exception =(exc)s"),
{'interface': interface_info, 'router_id': router_id,
'exc': exc}) | 0.00144 |
def convert_zero_consonant(pinyin):
"""零声母转换,还原原始的韵母
i行的韵母,前面没有声母的时候,写成yi(衣),ya(呀),ye(耶),yao(腰),
you(忧),yan(烟),yin(因),yang(央),ying(英),yong(雍)。
u行的韵母,前面没有声母的时候,写成wu(乌),wa(蛙),wo(窝),wai(歪),
wei(威),wan(弯),wen(温),wang(汪),weng(翁)。
ü行的韵母,前面没有声母的时候,写成yu(迂),yue(约),yuan(冤),
yun(晕);ü上两点省略。
"""
# y: yu -> v, yi -> i, y -> i
if pinyin.startswith('y'):
# 去除 y 后的拼音
no_y_py = pinyin[1:]
first_char = no_y_py[0] if len(no_y_py) > 0 else None
# yu -> ü: yue -> üe
if first_char in U_TONES:
pinyin = UV_MAP[first_char] + pinyin[2:]
# yi -> i: yi -> i
elif first_char in I_TONES:
pinyin = no_y_py
# y -> i: ya -> ia
else:
pinyin = 'i' + no_y_py
return pinyin
# w: wu -> u, w -> u
if pinyin.startswith('w'):
# 去除 w 后的拼音
no_w_py = pinyin[1:]
first_char = no_w_py[0] if len(no_w_py) > 0 else None
# wu -> u: wu -> u
if first_char in U_TONES:
pinyin = pinyin[1:]
# w -> u: wa -> ua
else:
pinyin = 'u' + pinyin[1:]
return pinyin
return pinyin | 0.000845 |
async def set_name_endpoint(request: web.Request) -> web.Response:
""" Set the name of the robot.
Request with POST /server/name {"name": new_name}
Responds with 200 OK {"hostname": new_name, "prettyname": pretty_name}
or 400 Bad Request
In general, the new pretty name will be the specified name. The true
hostname will be capped to 53 letters, and have any characters other than
ascii letters or dashes replaced with dashes to fit the requirements here
https://www.freedesktop.org/software/systemd/man/hostname.html#.
"""
def build_400(msg: str) -> web.Response:
return web.json_response(
data={'message': msg},
status=400)
body = await request.json()
if 'name' not in body or not isinstance(body['name'], str):
return build_400('Body has no "name" key with a string')
new_name = await set_name(body['name'])
request.app[DEVICE_NAME_VARNAME] = new_name
return web.json_response(data={'name': new_name},
status=200) | 0.000951 |
def videos(self, days=None):
"""
Return all <ArloVideo> objects from camera given days range
:param days: number of days to retrieve
"""
if days is None:
days = self._min_days_vdo_cache
library = ArloMediaLibrary(self._session, preload=False)
try:
return library.load(only_cameras=[self], days=days)
except (AttributeError, IndexError):
# make sure we are returning an empty list istead of None
# returning an empty list, cache will be forced only when calling
# the update method. Changing this can impact badly
# in the Home Assistant performance
return [] | 0.002829 |
def run_pyspark_yarn_cluster(env_dir, env_name, env_archive, args):
"""
Initializes the requires spark command line options on order to start a python job with the given python environment.
Parameters
----------
env_dir : str
env_name : str
env_archive : str
args : list
Returns
-------
This call will spawn a child process and block until that is complete.
"""
env = dict(os.environ)
yarn_python = os.path.join(".", "CONDA", env_name, "bin", "python")
archives = env_archive + "#CONDA"
prepend_args = [
"--master", "yarn",
"--deploy-mode", "cluster",
"--conf", "spark.yarn.appMasterEnv.PYSPARK_PYTHON={}".format(yarn_python),
"--archives", archives,
]
env_update = {
"PYSPARK_PYTHON": yarn_python
}
env.update(env_update)
spark_submit = os.path.join(env["SPARK_HOME"], "bin", "spark-submit")
log.info("Running spark in YARN-client mode with added arguments")
log.info(" args: %s", pprint.pprint(prepend_args, indent=4))
log.info(" env: %s", pprint.pprint(env_update, indent=4))
# REPLACE our python process with another one
subprocess.check_call([spark_submit] + prepend_args + args, env=env) | 0.00241 |
def build_strings(strings, prefix):
"""Construct string definitions according to
the previously maintained table.
"""
strings = [
(
make_c_str(prefix + str(number), value),
reloc_ptr(
prefix + str(number), 'reloc_delta', 'char *'
)
) for value, number in sort_values(strings)
]
return [i[0] for i in strings], [i[1] for i in strings] | 0.004228 |
def node_sub(self, node_self, node_other):
'''node_sub
Low-level api: Compute the delta of two configs. This method is
recursive. Assume two configs are different.
Parameters
----------
node_self : `Element`
A config node in a config tree that is being processed. node_self
cannot be a leaf node.
node_other : `Element`
A config node in another config tree that is being processed.
Returns
-------
None
There is no return of this method.
'''
def same_leaf_list(tag):
list_self = [c for c in list(node_self) if c.tag == tag]
list_other = [c for c in list(node_other) if c.tag == tag]
s_node = self.device.get_schema_node((list_self + list_other)[0])
if s_node.get('ordered-by') == 'user':
if [i.text for i in list_self] == [i.text for i in list_other]:
return True
else:
return False
else:
if set([i.text for i in list_self]) == \
set([i.text for i in list_other]):
return True
else:
return False
if self.preferred_replace != 'merge':
t_self = [c.tag for c in list(node_self) \
if self.device.get_schema_node(c).get('type') == \
'leaf-list']
t_other = [c.tag for c in list(node_other) \
if self.device.get_schema_node(c).get('type') == \
'leaf-list']
commonalities = set(t_self) & set(t_other)
for commonality in commonalities:
if not same_leaf_list(commonality):
node_self.set(operation_tag, 'replace')
node_other.set(operation_tag, 'replace')
return
in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \
self._group_kids(node_self, node_other)
ordered_by_user = {}
for child_self in in_s_not_in_o:
child_other = etree.Element(child_self.tag,
{operation_tag: self.preferred_delete},
nsmap=child_self.nsmap)
if self.preferred_create != 'merge':
child_self.set(operation_tag, self.preferred_create)
siblings = list(node_other.iterchildren(tag=child_self.tag))
if siblings:
siblings[-1].addnext(child_other)
else:
node_other.append(child_other)
s_node = self.device.get_schema_node(child_self)
if s_node.get('type') == 'leaf-list':
if s_node.get('ordered-by') == 'user' and \
s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = 'leaf-list'
child_other.text = child_self.text
elif s_node.get('type') == 'list':
keys = self._get_list_keys(s_node)
if s_node.get('ordered-by') == 'user' and \
s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = keys
for key in keys:
key_node = child_self.find(key)
e = etree.SubElement(child_other, key, nsmap=key_node.nsmap)
e.text = key_node.text
for child_other in in_o_not_in_s:
child_self = etree.Element(child_other.tag,
{operation_tag: self.preferred_delete},
nsmap=child_other.nsmap)
if self.preferred_create != 'merge':
child_other.set(operation_tag, self.preferred_create)
siblings = list(node_self.iterchildren(tag=child_other.tag))
if siblings:
siblings[-1].addnext(child_self)
else:
node_self.append(child_self)
s_node = self.device.get_schema_node(child_other)
if s_node.get('type') == 'leaf-list':
if s_node.get('ordered-by') == 'user' and \
s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = 'leaf-list'
child_self.text = child_other.text
elif s_node.get('type') == 'list':
keys = self._get_list_keys(s_node)
if s_node.get('ordered-by') == 'user' and \
s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = keys
for key in keys:
key_node = child_other.find(key)
e = etree.SubElement(child_self, key, nsmap=key_node.nsmap)
e.text = key_node.text
for child_self, child_other in in_s_and_in_o:
s_node = self.device.get_schema_node(child_self)
if s_node.get('type') == 'leaf':
if child_self.text == child_other.text:
if not s_node.get('is_key'):
node_self.remove(child_self)
node_other.remove(child_other)
else:
if self.preferred_replace != 'merge':
child_self.set(operation_tag, self.preferred_replace)
child_other.set(operation_tag, self.preferred_replace)
elif s_node.get('type') == 'leaf-list':
if s_node.get('ordered-by') == 'user':
if s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = 'leaf-list'
else:
node_self.remove(child_self)
node_other.remove(child_other)
elif s_node.get('type') == 'container':
if self._node_le(child_self, child_other) and \
self._node_le(child_other, child_self):
node_self.remove(child_self)
node_other.remove(child_other)
else:
self.node_sub(child_self, child_other)
elif s_node.get('type') == 'list':
if s_node.get('ordered-by') == 'user' and \
s_node.tag not in ordered_by_user:
ordered_by_user[s_node.tag] = self._get_list_keys(s_node)
if self._node_le(child_self, child_other) and \
self._node_le(child_other, child_self):
if s_node.get('ordered-by') == 'user':
for child in child_self.getchildren():
schema_node = self.device.get_schema_node(child)
if not schema_node.get('is_key'):
child_self.remove(child)
for child in child_other.getchildren():
schema_node = self.device.get_schema_node(child)
if not schema_node.get('is_key'):
child_other.remove(child)
else:
node_self.remove(child_self)
node_other.remove(child_other)
else:
self.node_sub(child_self, child_other)
else:
path = self.device.get_xpath(s_node)
raise ModelError("unknown schema node type: type of node {}" \
"is '{}'".format(path, s_node.get('type')))
for tag in ordered_by_user:
scope_s = in_s_not_in_o + in_s_and_in_o
scope_o = in_o_not_in_s + in_s_and_in_o
for sequence in self._get_sequence(scope_s, tag, node_self), \
self._get_sequence(scope_o, tag, node_other):
for item in sequence:
# modifying the namespace mapping of a node is not possible
# in lxml. See https://bugs.launchpad.net/lxml/+bug/555602
# if 'yang' not in item.nsmap:
# item.nsmap['yang'] = yang_url
i = sequence.index(item)
if i == 0:
item.set(insert_tag, 'first')
else:
item.set(insert_tag, 'after')
precursor = sequence[i - 1]
if ordered_by_user[tag] == 'leaf-list':
item.set(value_tag, precursor.text)
else:
keys = ordered_by_user[tag]
key_nodes = {k: precursor.find(k) for k in keys}
ids = {k: self._url_to_prefix(n, k) \
for k, n in key_nodes.items()}
l = ["[{}='{}']".format(ids[k], key_nodes[k].text) \
for k in keys]
item.set(key_tag, ''.join(l)) | 0.001661 |
def _set_cmap_seq(self, v, load=False):
"""
Setter method for cmap_seq, mapped from YANG variable /overlay_class_map/cmap_seq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cmap_seq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cmap_seq() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("cmap_seq_num",cmap_seq.cmap_seq, yang_name="cmap-seq", rest_name="seq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cmap-seq-num', extensions={u'tailf-common': {u'info': u'Sequence number', u'cli-no-key-completion': None, u'alt-name': u'seq', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'OverlayClassMapRuleCallPoint'}}), is_container='list', yang_name="cmap-seq", rest_name="seq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sequence number', u'cli-no-key-completion': None, u'alt-name': u'seq', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'OverlayClassMapRuleCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cmap_seq must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("cmap_seq_num",cmap_seq.cmap_seq, yang_name="cmap-seq", rest_name="seq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cmap-seq-num', extensions={u'tailf-common': {u'info': u'Sequence number', u'cli-no-key-completion': None, u'alt-name': u'seq', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'OverlayClassMapRuleCallPoint'}}), is_container='list', yang_name="cmap-seq", rest_name="seq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sequence number', u'cli-no-key-completion': None, u'alt-name': u'seq', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'OverlayClassMapRuleCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)""",
})
self.__cmap_seq = t
if hasattr(self, '_set'):
self._set() | 0.003366 |
def get_dattype_regionmode(regions, scen7=False):
"""
Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set.
In all MAGICC input files, there are two flags: THISFILE_DATTYPE and
THISFILE_REGIONMODE. These tell MAGICC how to read in a given input file. This
function maps the regions which are in a given file to the value of these flags
expected by MAGICC.
Parameters
----------
regions : list_like
The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for.
scen7 : bool, optional
Whether the file we are getting the flags for is a SCEN7 file or not.
Returns
-------
dict
Dictionary where the flags are the keys and the values are the value they
should be set to for the given inputs.
"""
dattype_flag = "THISFILE_DATTYPE"
regionmode_flag = "THISFILE_REGIONMODE"
region_dattype_row = _get_dattype_regionmode_regions_row(regions, scen7=scen7)
dattype = DATTYPE_REGIONMODE_REGIONS[dattype_flag.lower()][region_dattype_row].iloc[
0
]
regionmode = DATTYPE_REGIONMODE_REGIONS[regionmode_flag.lower()][
region_dattype_row
].iloc[0]
return {dattype_flag: dattype, regionmode_flag: regionmode} | 0.005542 |
async def create_room(self, alias: Optional[str] = None, is_public: bool = False,
name: Optional[str] = None, topic: Optional[str] = None,
is_direct: bool = False, invitees: Optional[List[str]] = None,
initial_state: Optional[List[dict]] = None) -> str:
"""
Create a new room. See also: `API reference`_
Args:
alias: The desired room alias **local part**. If this is included, a room alias will be
created and mapped to the newly created room. The alias will belong on the same
homeserver which created the room. For example, if this was set to "foo" and sent to
the homeserver "example.com" the complete room alias would be ``#foo:example.com``.
is_public: This flag sets the state event preset to ``public_chat``, which sets
``join_rules`` to ``public``. Defaults to false, which sets ``join_rules`` to
``invite``.
name: If this is included, an ``m.room.name`` event will be sent into the room to
indicate the name of the room. See `Room Events`_ for more information on
``m.room.name``.
topic: If this is included, an ``m.room.topic`` event will be sent into the room to
indicate the topic for the room. See `Room Events`_ for more information on
``m.room.topic``.
is_direct: This flag makes the server set the ``is_direct`` flag on the
``m.room.member`` events sent to the users in ``invite`` and ``invite_3pid``. See
`Direct Messaging`_ for more information.
invitees: A list of user IDs to invite to the room. This will tell the server to invite
everyone in the list to the newly created room.
initial_state: A list of state events to set in the new room. This allows the user to
override the default state events set in the new room. The expected format of the
state events are an object with type, state_key and content keys set.
Takes precedence over events set by `is_public`, but gets overriden by ``name`` and
``topic keys``.
Returns:
The ID of the newly created room.
Raises:
MatrixResponseError: If the response does not contain a ``room_id`` field.
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom
.. _Room Events:
https://matrix.org/docs/spec/client_server/r0.3.0.html#room-events
.. _Direct Messaging:
https://matrix.org/docs/spec/client_server/r0.3.0.html#direct-messaging
"""
await self.ensure_registered()
content = {
"visibility": "private",
"is_direct": is_direct,
"preset": "public_chat" if is_public else "private_chat",
}
if alias:
content["room_alias_name"] = alias
if invitees:
content["invite"] = invitees
if name:
content["name"] = name
if topic:
content["topic"] = topic
if initial_state:
content["initial_state"] = initial_state
resp = await self.client.request("POST", "/createRoom", content)
try:
return resp["room_id"]
except KeyError:
raise MatrixResponseError("Room create response did not contain room_id.") | 0.007578 |
def invoke_obfuscation(scriptString):
# Add letters a-z with random case to $RandomDelimiters.
alphabet = ''.join(choice([i.upper(), i]) for i in ascii_lowercase)
# Create list of random dxelimiters called randomDelimiters.
# Avoid using . * ' " [ ] ( ) etc. as delimiters as these will cause problems in the -Split command syntax.
randomDelimiters = ['_','-',',','{','}','~','!','@','%','&','<','>',';',':']
for i in alphabet:
randomDelimiters.append(i)
# Only use a subset of current delimiters to randomize what you see in every iteration of this script's output.
randomDelimiters = [choice(randomDelimiters) for _ in range(int(len(randomDelimiters)/4))]
# Convert $ScriptString to delimited ASCII values in [Char] array separated by random delimiter from defined list $RandomDelimiters.
delimitedEncodedArray = ''
for char in scriptString:
delimitedEncodedArray += str(ord(char)) + choice(randomDelimiters)
# Remove trailing delimiter from $DelimitedEncodedArray.
delimitedEncodedArray = delimitedEncodedArray[:-1]
# Create printable version of $RandomDelimiters in random order to be used by final command.
test = sample(randomDelimiters, len(randomDelimiters))
randomDelimitersToPrint = ''.join(i for i in test)
# Generate random case versions for necessary operations.
forEachObject = choice(['ForEach','ForEach-Object','%'])
strJoin = ''.join(choice([i.upper(), i.lower()]) for i in '[String]::Join')
strStr = ''.join(choice([i.upper(), i.lower()]) for i in '[String]')
join = ''.join(choice([i.upper(), i.lower()]) for i in '-Join')
charStr = ''.join(choice([i.upper(), i.lower()]) for i in 'Char')
integer = ''.join(choice([i.upper(), i.lower()]) for i in 'Int')
forEachObject = ''.join(choice([i.upper(), i.lower()]) for i in forEachObject)
# Create printable version of $RandomDelimiters in random order to be used by final command specifically for -Split syntax.
randomDelimitersToPrintForDashSplit = ''
for delim in randomDelimiters:
# Random case 'split' string.
split = ''.join(choice([i.upper(), i.lower()]) for i in 'Split')
randomDelimitersToPrintForDashSplit += '-' + split + choice(['', ' ']) + '\'' + delim + '\'' + choice(['', ' '])
randomDelimitersToPrintForDashSplit = randomDelimitersToPrintForDashSplit.strip('\t\n\r')
# Randomly select between various conversion syntax options.
randomConversionSyntax = []
randomConversionSyntax.append('[' + charStr + ']' + choice(['', ' ']) + '[' + integer + ']' + choice(['', ' ']) + '$_')
randomConversionSyntax.append('[' + integer + ']' + choice(['', ' ']) + '$_' + choice(['', ' ']) + choice(['-as', '-As', '-aS', '-AS']) + choice(['', ' ']) + '[' + charStr + ']')
randomConversionSyntax = choice(randomConversionSyntax)
# Create array syntax for encoded scriptString as alternative to .Split/-Split syntax.
encodedArray = ''
for char in scriptString:
encodedArray += str(ord(char)) + choice(['', ' ']) + ',' + choice(['', ' '])
# Remove trailing comma from encodedArray
encodedArray = '(' + choice(['', ' ']) + encodedArray.rstrip().rstrip(',') + ')'
# Generate random syntax to create/set OFS variable ($OFS is the Output Field Separator automatic variable).
# Using Set-Item and Set-Variable/SV/SET syntax. Not using New-Item in case OFS variable already exists.
# If the OFS variable did exists then we could use even more syntax: $varname, Set-Variable/SV, Set-Item/SET, Get-Variable/GV/Variable, Get-ChildItem/GCI/ChildItem/Dir/Ls
# For more info: https://msdn.microsoft.com/en-us/powershell/reference/5.1/microsoft.powershell.core/about/about_automatic_variables
setOfsVarSyntax = []
setOfsVarSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "''")
setOfsVarSyntax.append(choice(['Set-Variable', 'SV', 'SET']) + choice([' '*1, ' '*2]) + "'OFS'" + choice([' '*1, ' '*2]) + "''")
setOfsVar = choice(setOfsVarSyntax)
setOfsVarBackSyntax = []
setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '")
setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '")
setOfsVarBack = choice(setOfsVarBackSyntax)
# Randomize case of $SetOfsVar and $SetOfsVarBack.
setOfsVar = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVar)
setOfsVarBack = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVarBack)
# Generate the code that will decrypt and execute the payload and randomly select one.
baseScriptArray = []
baseScriptArray.append('[' + charStr + '[]' + ']' + choice(['', ' ']) + encodedArray)
baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'." + split + "(" + choice(['', ' ']) + "'" + randomDelimitersToPrint + "'" + choice(['', ' ']) + ')' + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'" + choice(['', ' ']) + randomDelimitersToPrintForDashSplit + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
baseScriptArray.append('(' + choice(['', ' ']) + encodedArray + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
# Generate random JOIN syntax for all above options
newScriptArray = []
newScriptArray.append(choice(baseScriptArray) + choice(['', ' ']) + join + choice(['', ' ']) + "''")
newScriptArray.append(join + choice(['', ' ']) + choice(baseScriptArray))
newScriptArray.append(strJoin + '(' + choice(['', ' ']) + "''" + choice(['', ' ']) + ',' + choice(['', ' ']) + choice(baseScriptArray) + choice(['', ' ']) + ')')
newScriptArray.append('"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVar + choice(['', ' ']) + ')' + choice(['', ' ']) + '"' + choice(['', ' ']) + '+' + choice(['', ' ']) + strStr + choice(baseScriptArray) + choice(['', ' ']) + '+' + '"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVarBack + choice(['', ' ']) + ')' + choice(['', ' ']) + '"')
# Randomly select one of the above commands.
newScript = choice(newScriptArray)
# Generate random invoke operation syntax.
# Below code block is a copy from Out-ObfuscatedStringCommand.ps1. It is copied into this encoding function so that this will remain a standalone script without dependencies.
invokeExpressionSyntax = []
invokeExpressionSyntax.append(choice(['IEX', 'Invoke-Expression']))
# Added below slightly-randomized obfuscated ways to form the string 'iex' and then invoke it with . or &.
# Though far from fully built out, these are included to highlight how IEX/Invoke-Expression is a great indicator but not a silver bullet.
# These methods draw on common environment variable values and PowerShell Automatic Variable values/methods/members/properties/etc.
invocationOperator = choice(['.','&']) + choice(['', ' '])
invokeExpressionSyntax.append(invocationOperator + "( $ShellId[1]+$ShellId[13]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $PSHome[" + choice(['4', '21']) + "]+$PSHOME[" + choice(['30', '34']) + "]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $env:Public[13]+$env:Public[5]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $env:ComSpec[4," + choice(['15', '24', '26']) + ",25]-Join'')")
invokeExpressionSyntax.append(invocationOperator + "((" + choice(['Get-Variable','GV','Variable']) + " '*mdr*').Name[3,11,2]-Join'')")
invokeExpressionSyntax.append(invocationOperator + "( " + choice(['$VerbosePreference.ToString()','([String]$VerbosePreference)']) + "[1,3]+'x'-Join'')")
# Randomly choose from above invoke operation syntaxes.
invokeExpression = choice(invokeExpressionSyntax)
# Randomize the case of selected invoke operation.
invokeExpression = ''.join(choice([i.upper(), i.lower()]) for i in invokeExpression)
# Choose random Invoke-Expression/IEX syntax and ordering: IEX ($ScriptString) or ($ScriptString | IEX)
invokeOptions = []
invokeOptions.append(choice(['', ' ']) + invokeExpression + choice(['', ' ']) + '(' + choice(['', ' ']) + newScript + choice(['', ' ']) + ')' + choice(['', ' ']))
invokeOptions.append(choice(['', ' ']) + newScript + choice(['', ' ']) + '|' + choice(['', ' ']) + invokeExpression)
obfuscatedPayload = choice(invokeOptions)
"""
# Array to store all selected PowerShell execution flags.
powerShellFlags = []
noProfile = '-nop'
nonInteractive = '-noni'
windowStyle = '-w'
# Build the PowerShell execution flags by randomly selecting execution flags substrings and randomizing the order.
# This is to prevent Blue Team from placing false hope in simple signatures for common substrings of these execution flags.
commandlineOptions = []
commandlineOptions.append(noProfile[0:randrange(4, len(noProfile) + 1, 1)])
commandlineOptions.append(nonInteractive[0:randrange(5, len(nonInteractive) + 1, 1)])
# Randomly decide to write WindowStyle value with flag substring or integer value.
commandlineOptions.append(''.join(windowStyle[0:randrange(2, len(windowStyle) + 1, 1)] + choice([' '*1, ' '*2, ' '*3]) + choice(['1','h','hi','hid','hidd','hidde'])))
# Randomize the case of all command-line arguments.
for count, option in enumerate(commandlineOptions):
commandlineOptions[count] = ''.join(choice([i.upper(), i.lower()]) for i in option)
for count, option in enumerate(commandlineOptions):
commandlineOptions[count] = ''.join(option)
commandlineOptions = sample(commandlineOptions, len(commandlineOptions))
commandlineOptions = ''.join(i + choice([' '*1, ' '*2, ' '*3]) for i in commandlineOptions)
obfuscatedPayload = 'powershell.exe ' + commandlineOptions + newScript
"""
return obfuscatedPayload | 0.007336 |
def _identity(self, *args, **kwargs):
'''
Local users and groups.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
'''
LOCAL = 'local accounts'
EXT = 'external accounts'
data = dict()
data[LOCAL] = self._get_local_users(disabled=kwargs.get('disabled'))
data[EXT] = self._get_external_accounts(data[LOCAL].keys()) or 'N/A'
data['local groups'] = self._get_local_groups()
return data | 0.002766 |
def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return
pos = event.GetPosition()
width, height = self.canvas2.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) | 0.002336 |
def appndd(item, cell):
"""
Append an item to a double precision cell.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/appndd_c.html
:param item: The item to append.
:type item: Union[float,Iterable[float]]
:param cell: The cell to append to.
:type cell: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cell, stypes.SpiceCell)
if hasattr(item, "__iter__"):
for d in item:
libspice.appndd_c(ctypes.c_double(d), cell)
else:
item = ctypes.c_double(item)
libspice.appndd_c(item, cell) | 0.001715 |
def require_paragraph(self):
"""Create a new paragraph unless the currently-active container
is already a paragraph."""
if self._containers and _is_paragraph(self._containers[-1]):
return False
else:
self.start_paragraph()
return True | 0.006623 |
def getshapestring(self, startrow=1, nrow=-1, rowincr=1):
"""Get the shapes of all cells in the column in string format.
(see :func:`table.getcolshapestring`)"""
return self._table.getcolshapestring(self._column,
startrow, nrow, rowincr) | 0.006536 |
def output_all_points(self):
"""Return all points in the bank.
Return all points in the bank as lists of m1, m2, spin1z, spin2z.
Returns
-------
mass1 : list
List of mass1 values.
mass2 : list
List of mass2 values.
spin1z : list
List of spin1z values.
spin2z : list
List of spin2z values.
"""
mass1 = []
mass2 = []
spin1z = []
spin2z = []
for i in self.massbank.keys():
for j in self.massbank[i].keys():
for k in xrange(len(self.massbank[i][j]['mass1s'])):
curr_bank = self.massbank[i][j]
mass1.append(curr_bank['mass1s'][k])
mass2.append(curr_bank['mass2s'][k])
spin1z.append(curr_bank['spin1s'][k])
spin2z.append(curr_bank['spin2s'][k])
return mass1, mass2, spin1z, spin2z | 0.002055 |
def delete_vector(self, data, v=None):
"""
Deletes vector v and his id (data) in all matching buckets in the storage.
The data argument must be JSON-serializable.
"""
# Delete data id in each hashes
for lshash in self.lshashes:
if v is None:
keys = self.storage.get_all_bucket_keys(lshash.hash_name)
else:
keys = lshash.hash_vector(v)
self.storage.delete_vector(lshash.hash_name, keys, data) | 0.005906 |
def randints(s, e, n=1):
"""
returns n uniform random numbers from [s, e]
"""
assert e >= s, "Wrong range: [{}, {})".format(s, e)
n = max(1, n)
arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))]
return arr | 0.010417 |
def Ctrl(cls, key):
""" 在指定元素上执行ctrl组合键事件
@note: key event -> control + key
@param key: 如'X'
"""
element = cls._element()
element.send_keys(Keys.CONTROL, key) | 0.013393 |
def load_file(self, filename):
"""Load mask image.
Results are appended to previously loaded masks.
This can be used to load mask per color.
"""
if not os.path.isfile(filename):
return
self.logger.info('Loading mask image from {0}'.format(filename))
try:
# 0=False, everything else True
dat = fits.getdata(filename).astype(np.bool)
except Exception as e:
self.logger.error('{0}: {1}'.format(e.__class__.__name__, str(e)))
return
key = '{0},{1}'.format(self.maskcolor, self.maskalpha)
if key in self.tree_dict:
sub_dict = self.tree_dict[key]
else:
sub_dict = {}
self.tree_dict[key] = sub_dict
# Add to listing
seqstr = '{0:04d}'.format(self._seqno) # Prepend 0s for proper sort
sub_dict[seqstr] = Bunch.Bunch(ID=seqstr,
MASKFILE=os.path.basename(filename))
self._treepaths.append((key, seqstr))
self._seqno += 1
# Create mask layer
obj = self.dc.Image(0, 0, masktorgb(
dat, color=self.maskcolor, alpha=self.maskalpha))
self._maskobjs.append(obj)
self.redo() | 0.001571 |
def action_bootstrap(verbose=False):
"""Bootstrap the local REPO with a few cool ontologies"""
printDebug("The following ontologies will be imported:")
printDebug("--------------")
count = 0
for s in BOOTSTRAP_ONTOLOGIES:
count += 1
print(count, "<%s>" % s)
printDebug("--------------")
printDebug("Note: this operation may take several minutes.")
printDebug("Proceed? [Y/N]")
var = input()
if var == "y" or var == "Y":
for uri in BOOTSTRAP_ONTOLOGIES:
try:
printDebug("--------------")
action_import(uri, verbose)
except:
printDebug(
"OPS... An Unknown Error Occurred - Aborting Installation")
printDebug("\n==========\n" + "Bootstrap command completed.",
"important")
return True
else:
printDebug("--------------")
printDebug("Goodbye")
return False | 0.002004 |
def FlushCache(self):
"""Empties the cache that holds cached decompressed data."""
self._cache = b''
self._cache_start_offset = None
self._cache_end_offset = None
self._ResetDecompressorState() | 0.004695 |
async def start_component_in_thread(executor, workload: CoroutineFunction[T], *args: Any, loop=None, **kwargs: Any) -> Component[T]:
"""\
Starts the passed `workload` with additional `commands` and `events` pipes.
The workload will be executed on an event loop in a new thread; the thread is provided by `executor`.
This function is not compatible with `ProcessPoolExecutor`,
as references between the workload and component are necessary.
Be careful when using an executor with a maximum number of threads,
as long running workloads may starve other tasks.
Consider using a dedicated executor that can spawn at least as many threads
as concurrent long-running tasks are expected.
"""
loop = loop or asyncio.get_event_loop()
commands_a, commands_b = pipe(loop=loop)
events_a, events_b = pipe(loop=loop)
commands_b = ConcurrentPipeEnd(commands_b, loop=loop)
events_b = ConcurrentPipeEnd(events_b, loop=loop)
_workload = workload(*args, commands=commands_b, events=events_b, **kwargs)
future = cast(_Future[T], loop.run_in_executor(executor, asyncio.run, _workload))
component = Component[T](commands_a, events_a, future)
await component.wait_for_start()
return component | 0.003185 |
def append(self, *values):
"""Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2, 3).append(4,5)
[1, 2, 3, 4, 5]
>>> lst
[1, 2, 3, 4, 5]
"""
for value in values:
list.append(self, value)
return self | 0.003643 |
def new_scope(self, new_scope={}):
"""Add a new innermost scope for the duration of the with block.
Args:
new_scope (dict-like): The scope to add.
"""
old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope)
yield
self.scopes = old_scopes | 0.006369 |
def create_case_task(self, case_id, case_task):
"""
:param case_id: Case identifier
:param case_task: TheHive task
:type case_task: CaseTask defined in models.py
:return: TheHive task
:rtype: json
"""
req = self.url + "/api/case/{}/task".format(case_id)
data = case_task.jsonify()
try:
return requests.post(req, headers={'Content-Type': 'application/json'}, data=data, proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseTaskException("Case task create error: {}".format(e)) | 0.004587 |
def rebuild( self ):
"""
Rebuilds the information for this scene.
"""
self._buildData.clear()
self._dateGrid.clear()
self._dateTimeGrid.clear()
curr_min = self._minimumDate
curr_max = self._maximumDate
self._maximumDate = QDate()
self._minimumDate = QDate()
self.markForRebuild(False)
# rebuilds the month view
if ( self.currentMode() == XCalendarScene.Mode.Month ):
self.rebuildMonth()
elif ( self.currentMode() in (XCalendarScene.Mode.Week,
XCalendarScene.Mode.Day)):
self.rebuildDays()
# rebuild the items in the scene
items = sorted(self.items())
for item in items:
item.setPos(0, 0)
item.hide()
for item in items:
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
if ( curr_min != self._minimumDate or curr_max != self._maximumDate ):
parent = self.parent()
if ( parent and not parent.signalsBlocked() ):
parent.dateRangeChanged.emit(self._minimumDate,
self._maximumDate) | 0.015754 |
def split_datetime(self, column_name_prefix = "X", limit=None, timezone=False):
"""
Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
**Column Naming**
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
**Timezone Column**
If timezone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
- 'year': The year number
- 'month': A value between 1 and 12 where 1 is January.
- 'day': Day of the months. Begins at 1.
- 'hour': Hours since midnight.
- 'minute': Minutes after the hour.
- 'second': Seconds after the minute.
- 'us': Microseconds after the second. Between 0 and 999,999.
- 'weekday': A value between 0 and 6 where 0 is Monday.
- 'isoweekday': A value between 1 and 7 where 1 is Monday.
- 'tmweekday': A value between 0 and 7 where 0 is Sunday
timezone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and timezone elements of a datetime SArray
with timezone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],timezone=True)
Columns:
Y.year int
Y.timezone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.timezone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns]
"""
from .sframe import SFrame as _SFrame
if self.dtype != datetime.datetime:
raise TypeError("Only column of datetime type is supported.")
if column_name_prefix is None:
column_name_prefix = ""
if six.PY2 and type(column_name_prefix) == unicode:
column_name_prefix = column_name_prefix.encode('utf-8')
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# convert limit to column_keys
if limit is not None:
if not _is_non_string_iterable(limit):
raise TypeError("'limit' must be a list")
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
if (name_types.pop() != str):
raise TypeError("'limit' must contain string values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
column_types = []
if(limit is None):
limit = ['year','month','day','hour','minute','second']
column_types = [int] * len(limit)
if(timezone == True):
limit += ['timezone']
column_types += [float]
with cython_context():
return _SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types)) | 0.003779 |
def page_load_time(self):
"""
The average total load time for all runs (not weighted).
"""
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision) | 0.00885 |
def yuv_to_rgb(y, u=None, v=None):
"""Convert the color from YUV coordinates to RGB.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % yuv_to_rgb(0.5925, -0.2916, 0.3575)
'(0.999989, 0.500015, -6.3276e-05)'
"""
if type(y) in [list,tuple]:
y, u, v = y
r = y + (v * 1.13983)
g = y - (u * 0.39465) - (v * 0.58060)
b = y + (u * 2.03211)
return (r, g, b) | 0.013072 |
def to_binary(value, encoding='utf-8'):
"""Convert value to binary string, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return b''
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return to_text(value).encode(encoding) | 0.002415 |
async def do_connect(self, args):
"""Connect to the PLM device.
Usage:
connect [device [workdir]]
Arguments:
device: PLM device (default /dev/ttyUSB0)
workdir: Working directory to save and load device information
"""
params = args.split()
device = '/dev/ttyUSB0'
workdir = None
try:
device = params[0]
except IndexError:
if self.tools.device:
device = self.tools.device
try:
workdir = params[1]
except IndexError:
if self.tools.workdir:
workdir = self.tools.workdir
if device:
await self.tools.connect(False, device=device, workdir=workdir)
_LOGGING.info('Connection complete.') | 0.002457 |
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
} | 0.001907 |
def voxel_seg(self, segfile, MRSfile):
"""
add voxel segmentation info
Parameters
----------
segfile : str
Path to nifti file with segmentation info (e.g. XXXX_aseg.nii.gz)
MRSfile : str
Path to MRS nifti file
"""
total, grey, white, csf, nongmwm, pGrey, pWhite, pCSF, pNongmwm =\
fs.MRSvoxelStats(segfile, MRSfile)
self.pGrey = pGrey
self.pWhite = pWhite
self.pCSF = pCSF
self.pNongmwm = pNongmwm | 0.012367 |
def AnularLiquidacion(self, coe):
"Anular liquidación activa"
ret = self.client.liquidacionAnular(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['anulacionReturn']
self.__analizar_errores(ret)
self.Resultado = ret['resultado']
return self.COE | 0.004329 |
def _get_ssl_sock(self):
"""Get raw SSL socket."""
assert self.scheme == u"https", self
raw_connection = self.url_connection.raw._connection
if raw_connection.sock is None:
# sometimes the socket is not yet connected
# see https://github.com/kennethreitz/requests/issues/1966
raw_connection.connect()
return raw_connection.sock | 0.004963 |
def enhance(self):
"""Load metadata from a data service to improve naming.
:raises tvrenamer.exceptions.ShowNotFound:
when unable to find show/series name based on parsed name
:raises tvrenamer.exceptions.EpisodeNotFound:
when unable to find episode name(s) based on parsed data
"""
series, error = self.api.get_series_by_name(self.series_name)
if series is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.ShowNotFound(str(error))
self.series_name = self.api.get_series_name(series)
self.episode_names, error = self.api.get_episode_name(
series, self.episode_numbers, self.season_number)
if self.episode_names is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.EpisodeNotFound(str(error)) | 0.002148 |
def repeat(coro, times=1, step=1, limit=1, loop=None):
"""
Executes the coroutine function ``x`` number of times,
and accumulates results in order as you would use with ``map``.
Execution concurrency is configurable using ``limit`` param.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to schedule.
times (int): number of times to execute the coroutine.
step (int): increment iteration step, as with ``range()``.
limit (int): concurrency execution limit. Defaults to 10.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if coro is not a coroutine function.
Returns:
list: accumulated yielded values returned by coroutine.
Usage::
async def mul_2(num):
return num * 2
await paco.repeat(mul_2, times=5)
# => [2, 4, 6, 8, 10]
"""
assert_corofunction(coro=coro)
# Iterate and attach coroutine for defer scheduling
times = max(int(times), 1)
iterable = range(1, times + 1, step)
# Run iterable times
return (yield from map(coro, iterable, limit=limit, loop=loop)) | 0.000838 |
def deserialize(cls, assoc_s):
"""
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
"""
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError('Unexpected key values: %r', keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != '2':
raise ValueError('Unknown version: %r' % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type) | 0.002268 |
def done(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_DONE, root=_path(host, root=hosts(p_queue)))
'''Construct a path to the done dir for a queue'''
return _path(p_queue, _c.FSQ_DONE) | 0.004525 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.