content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def login(client, username='', password=''):
"""
Log a specific user in.
:param client: Flask client
:param username: The username
:type username: str
:param password: The password
:type password: str
:return: Flask response
"""
user = dict(login=username, password=password)
response = client.post(url_for('blog.login'), data=user,
follow_redirects=True)
return response | c0a9ac806fc0f1b55ebc76f5f50aa4b8e71436c4 | 7,100 |
def func(x, params):
"""The GNFW radial profile.
Args:
x (:obj:`np.ndarray`): Radial coordinate.
params (:obj:`dict`): Dictionary with keys `alpha`, `beta`, `gamma`, `c500`, and `P0` that defines
the GNFW profile shape.
Returns:
Profile (1d :obj:`np.ndarray`).
"""
G, A, B, c500, P0 = params['gamma'], params['alpha'], params['beta'], params['c500'], params['P0']
prof=np.zeros(x.shape)
mask=np.greater(x, 0)
prof[mask]=P0*((x[mask]*c500)**-G * (1+(x[mask]*c500)**A)**((G-B)/A))
#prof[x == 0]=np.inf
return prof | a7510bdcc7e5938ece6d888620372f95d013a114 | 7,101 |
def get_double_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
pad = cfg.TRAIN.PADDING
for i in range(num_images):
# Process A image
im = get_a_img(roidb[i])
other_im = cv2.imread(roidb[i]['b_image'])
# Process B image
h, w, _ = im.shape
b_y1, b_x1, b_y2, b_x2 = roidb[i]['b_bbox']
if cfg.TRAIN.AUG_LRV_BBOX:
b_x1, b_y1, b_x2, b_y2 = boxes_utils.aug_align_box((b_x1,b_y1,b_x2,b_y2), (other_im.shape[1], other_im.shape[0]), pad=0)
tmp = other_im[b_y1:b_y2, b_x1:b_x2, 0]
other_im = cv2.resize(other_im[b_y1:b_y2, b_x1:b_x2, 0], dsize=(w-2*pad, h-2*pad), interpolation=cv2.INTER_CUBIC)
if cfg.HIST_EQ:
if cfg.A_HIST_EQ:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
other_im = clahe.apply(other_im)
else:
other_im = cv2.equalizeHist(other_im)
other_im = pad_image(other_im, pad=pad)[:, ::-1]
other_im = np.tile(other_im, (3, 1, 1))
other_im = np.transpose(other_im, (1, 2, 0)).astype(np.uint8)
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
other_im = other_im[:, ::-1, :]
# generate the cropped image
if cfg.TRAIN.ONLINE_RANDOM_CROPPING:
x1, y1, x2, y2 = roidb[i]['cropped_bbox']
im = im[y1:y2+1, x1:x2+1, :]
other_im = other_im[y1:y2+1, x1:x2+1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
if cfg.TRAIN.AUGMENTATION:
transform_cv = cv_transforms.Compose([
cv_transforms.ColorJitter(brightness=0.5,
contrast=0.25, gamma=0.5)])
else:
transform_cv = None
# TODO: add augmentation
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE, transform_cv)
other_im, other_im_scale = blob_utils.prep_im_for_blob(
other_im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE, transform_cv)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
processed_ims.append(other_im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales | 85c3ff54df587f5e801edc3e8a68dd804106da68 | 7,102 |
def _readFromSettings(self, key):
"""Loads the settings object associated with the program and
returns the value at the key."""
COMPANY, APPNAME, _ = SELMAGUISettings.getInfo()
COMPANY = COMPANY.split()[0]
APPNAME = APPNAME.split()[0]
settings = QtCore.QSettings(COMPANY, APPNAME)
val = None
try:
val = settings.value(key)
except:
self._signalObject.errorMessageSignal.emit(
"Wrong setting accessed.")
return val
#Return the right type
if val == "true":
return True
if val == "false":
return False
return float(val) | a96b7b14789bb848fe288e419b1b9ff8c9b35db8 | 7,103 |
import os
def run_overlay_resources_score_motifs(normal_expression_per_tissue_origin_per_TF,
matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict,
cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header):
"""pairs matching chromosomes in motif_sites_input_dir and all_chromatin_makrs_all_cells_input_dir and calls overlay_resources_score_motifs
Input: moitf instances input dir (one file per chr)
chromatin data collection dir (one file per chr, bed4 format; track pos, track cell#assaytype#value or cell#TFname in case of chip-seq)
Return: a list of motif_overlapping_track files
Precondition: files in motif_sites_input_dir and chromatin_tracks_input_dir should have the same names
Recommended: name files in both dirs as chrNumber, chrX or chrY (where number is between 1-22)
"""
motif_files = []
if not os.path.isdir(params['motif_sites_dir']) and os.path.isfile(params['motif_sites_dir']):
motif_files = [params['motif_sites_dir']]
params['motif_sites_dir'] = "."
else:
motif_files = os.listdir(params['motif_sites_dir'])
chromatin_tracks_files = os.listdir(params['all_chromatin_makrs_all_cells_combined_dir_path'])
if not os.path.exists(params['motifs_overlapping_tracks_output_dir']):
os.mkdir(params['motifs_overlapping_tracks_output_dir'])
motifs_overlapping_tracks_files = []
scored_motifs_overlapping_tracks_files = []
if get_value(params['run_in_parallel_param']) and len(motif_files)>1:
p = Pool(int(params['number_processes_to_run_in_parallel']))
for motif_file in motif_files:
if motif_file.split('/')[-1] in chromatin_tracks_files:#it is assumed for every motif file name there exists a matching file name in the chromatin_tracks_input_dir
motifs_overlapping_tracks_file = params['motifs_overlapping_tracks_output_dir']+'/' + '.'.join(motif_file.split('/')[-1].split('.')[0:-1])+'_overlapping_tracks' + '.bed7'
scored_motifs_chromatin_tracks_output_file = '.'.join(motifs_overlapping_tracks_file.split('.')[0:-1]) + '_scored.bed10'
if not (os.path.exists(motifs_overlapping_tracks_file) and os.path.exists(scored_motifs_chromatin_tracks_output_file)):
if get_value(params['run_in_parallel_param']) and len(motif_files)>1:
p.apply_async(overlay_resources_score_motifs, args=(params['motif_sites_dir']+'/'+motif_file,
params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1],
scored_motifs_chromatin_tracks_output_file,
motifs_overlapping_tracks_file,
normal_expression_per_tissue_origin_per_TF,
matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict,
cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header))
else:
overlay_resources_score_motifs(params['motif_sites_dir']+'/'+motif_file,
params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1],
scored_motifs_chromatin_tracks_output_file,
motifs_overlapping_tracks_file,
normal_expression_per_tissue_origin_per_TF,
matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict,
cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header)
motifs_overlapping_tracks_files.append(motifs_overlapping_tracks_file)
scored_motifs_overlapping_tracks_files.append(scored_motifs_chromatin_tracks_output_file)
if get_value(params['run_in_parallel_param']) and len(motif_files)>1:
p.close()
p.join()
return motifs_overlapping_tracks_files, scored_motifs_overlapping_tracks_files | aba77b326b0846a8d32b0c215c7722f278af4aaf | 7,104 |
import os
def history_directory(repo_loc: str) -> str:
"""Retrieve the directory containing job logs for the specified repository
Parameters
----------
repo_loc : str
FAIR-CLI repository path
Returns
-------
str
location of the job logs directory
"""
return os.path.join(
fdp_com.find_fair_root(repo_loc), fdp_com.FAIR_FOLDER, "logs"
) | b70806c6f6446489b31e3fb1eb12393c03c2ddef | 7,105 |
import logging
def is_statu(search_data):
"""
判断是否有参数,且为正常还是停用
:param search_data:
:return:
"""
logging.info('is_statu')
if search_data:
if search_data == '正常':
return '1'
elif search_data == '停用':
return '0'
else:
return search_data
else:
return '' | b9bcc643f2bb73fd692017cf5ff1dee23d528a8f | 7,106 |
def get_mysql_exception(errno, msg, sqlstate=None):
"""Get the exception matching the MySQL error
This function will return an exception based on the SQLState. The given
message will be passed on in the returned exception.
The exception returned can be customized using the
mysql.connector.custom_error_exception() function.
Returns an Exception
"""
try:
return _CUSTOM_ERROR_EXCEPTIONS[errno](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Error was not mapped to particular exception
pass
try:
return _ERROR_EXCEPTIONS[errno](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Error was not mapped to particular exception
pass
if not sqlstate:
return DatabaseError(msg=msg, errno=errno)
try:
return _SQLSTATE_CLASS_EXCEPTION[sqlstate[0:2]](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Return default InterfaceError
return DatabaseError(msg=msg, errno=errno, sqlstate=sqlstate) | 4ce4ae51a9a87b2a303aca4de5ac238fc6adf115 | 7,107 |
import os
def clear_pkt_loss():
"""
:return:
"""
pkt_loss_file_path = os.path.join(os.getcwd(), 'pkt_loss.yaml')
if os.path.isfile(pkt_loss_file_path):
os.remove(pkt_loss_file_path)
return pkt_loss_file_path | 0437b0aa910e8135409b54b890e1807208ef153e | 7,108 |
from typing import List
def get_image_resize_transform_steps(config, dataset) -> List:
"""
Resizes the image to a slightly larger square.
"""
assert dataset.original_resolution is not None
assert config.resize_scale is not None
scaled_resolution = tuple(
int(res * config.resize_scale) for res in dataset.original_resolution
)
return [
transforms.Resize(scaled_resolution)
] | d3c1ddd5a072efc853cc7967b70f2e98011d31a4 | 7,109 |
def get_page_title(page_src, meta_data):
"""Returns the title of the page. The title in the meta data section
will take precedence over the H1 markdown title if both are provided."""
return (
meta_data['title']
if 'title' in meta_data and isinstance(meta_data['title'], str)
else get_markdown_title(page_src)
) | e9fc19f9bc1d615c2ba8b4210f9be5212c282e53 | 7,110 |
import argparse
def get_args():
"""引数解析
Returns:
argparse.Namespace: 引数情報
"""
parser = argparse.ArgumentParser(
prog="app.py",
usage="realtime or audio file",
description="detect music change point.",
add_help=True
)
parser.add_argument(
"--cfg", type=str,
default="./settings.yaml",
help="setting file path"
)
parser.add_argument(
"--file", type=str,
default=None,
help="audio file path"
)
return parser.parse_args() | ee63f4043524bbe393d17f0a25a687540d209faa | 7,111 |
def make_3dplot(fname_inp, fname_fig, clim=[None,None], vnames=[], data_processor=None, verbose='debug', **kws):
"""
make 3D plot with a radial and longitudinal cuts
"""
logger.setLevel(getattr(logging, verbose.upper()))
assert len(vnames)>0, ' [-] We need names in vnames!\n'
# we'll obtain:
# - same data but in structured way; friendly for plot_surface().
# - fill 'vdict'' with original ASCII data
d = get_array_vars(fname_inp, checks=False, complete_domain_walk=True, vnames=vnames, data_processor=data_processor)
# NOTE: d['data'] is processed sutff built from the original (from
# the ASCII file) simulation data. Such processing was made
# by 'data_processor()'.
# NOTE: a this point, 'vdict' has the original data from the ASCII file.
r, ph, th = d['coords']
Bmod = d['data'];
print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod)
cbmin, cbmax = clim if clim is not [None,None] else (np.nanmin(Bmod),np.nanmax(Bmod))
figsize = kws.get('figsize', None) # [inches] 2-tuple
if figsize is None:
# Deduce the 'figsize' as a function of:
# * the dpi of the monitor
# * the desired size in pixels of the figure
# Grab the dpi value of this monitor. Source:
# * https://stackoverflow.com/questions/13714454/specifying-and-saving-a-figure-with-exact-size-in-pixels#13714720
# * https://stackoverflow.com/questions/3129322/how-do-i-get-monitor-resolution-in-python/45467999#45467999
tk = Tkinter.Tk()
dpi_w = tk.winfo_screenwidth()/(tk.winfo_screenmmwidth()/25.4)
dpi_h = tk.winfo_screenheight()/(tk.winfo_screenmmheight()/25.4)
# size in pixels
pixels = kws.get('pixels', [128.,100.])
figsize = (pixels[0]/dpi_w, pixels[1]/dpi_h) # [inches]
#--- figure
fig_stuff = {
'fig' : figure(1, figsize=figsize),
}
fig_stuff.update({
'ax' : fig_stuff['fig'].add_subplot(111, projection='3d'),
'norm' : LogNorm(cbmin,cbmax) if kws.get('cscale','log')=='log' else Normalize(cbmin,cbmax),
})
fig = fig_stuff['fig']
ax = fig_stuff['ax']
norm = fig_stuff['norm']
#--- plot for fixed "r"
o__fixed_r = PlotCut_fixed_r(fig_stuff, d,
ro = kws.get('ro', 5.0),
pazim = kws.get('pazim',-60.),
verbose = verbose,
)
fig, ax = o__fixed_r['FigAx']
r_plot = o__fixed_r['r_plot']
surf_r = o__fixed_r['surf']
#--- plot for fixed "ph"
r_range = kws.get('r_range', [1.0,7.0])
pho = kws.get('pho', 10.0)
o__fixed_r = PlotCut_fixed_ph(fig_stuff, d,
pho = pho,
r_range=r_range,
pazim = kws.get('pazim',-60.),
verbose = verbose,
)
fig, ax = o__fixed_r['FigAx']
ph_plot = o__fixed_r['ph_plot']
surf_ph = o__fixed_r['surf']
# uniform axis limits
axmin = np.min([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')])
axmax = np.max([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')])
ax.set_xlim(axmin,axmax)
ax.set_ylim(axmin,axmax)
ax.set_zlim(axmin,axmax)
# perspective azimuth
ax.azim = kws.get('pazim', -60.)
sm = cm.ScalarMappable(cmap=surf_r.cmap, norm=fig_stuff['norm'])
sm.set_array(d['data']); #surf.set_array(var)
# labels && title
ax.set_xlabel('X [Ro]')
ax.set_ylabel('Y [Ro]')
ax.set_zlabel('Z [Ro]')
TITLE = '$r_o$ = %.2g $R_o$' % r_plot +\
'\n($\phi_o$,r1,r2) : ($%g^o,%g\,Ro,%g\,Ro$)' % (pho,r_range[0],r_range[1])
# extract the step number from the input filename
if kws.get('wtimelabel',False):
tlabel = fname_inp.split('/')[-1].split('.h5')[0].split('_')[-1].replace('n','')
TITLE += '\n step: '+tlabel
ax.set_title(TITLE)
#--- colorbar
cb_label = '|B| [G]'
cb_fontsize = 13
axcb = fig.colorbar(sm, ax=ax)
axcb.set_label(cb_label, fontsize=cb_fontsize)
sm.set_clim(vmin=cbmin, vmax=cbmax)
# save figure
#show()
fig.savefig(fname_fig, dpi=kws.get('dpi',100), bbox_inches='tight')
close(fig)
del fig
return None | 29a97a4cbb191f2fb617652701add4dd90d7ba29 | 7,112 |
def saveReplayBuffer():
"""
Flush and save the contents of the Replay Buffer to disk. This is
basically the same as triggering the "Save Replay Buffer" hotkey.
Will return an `error` if the Replay Buffer is not active.
"""
return __createJSON("SaveReplayBuffer", {}) | 4be684acb7751ee6c78825a3e8c702db1b5d18f2 | 7,113 |
import base64
import json
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if request.user is None or not request.user.is_authenticated() or not user_has_student(request.user) or ALWAYS_LOGIN:
key = 'HTTP_AUTHORIZATION'
if key not in request.META:
key = 'REDIRECT_HTTP_AUTHORIZATION'
if key not in request.META:
key = 'HTTP_X_AUTHORIZATION'
if key in request.META:
auth = request.META[key].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
# Basic authentication - this is not an API client
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
permissions = APIClient.universal_permission_flag()
elif auth[0].lower() == "bearer":
# The client bears a FireRoad-issued token
user, permissions, error = extract_token_info(request, auth[1])
if error is not None:
return HttpResponse(json.dumps(error), status=401, content_type="application/json")
user.backend = 'django.contrib.auth.backends.ModelBackend'
else:
raise PermissionDenied
request.session['permissions'] = permissions
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
raise PermissionDenied
#return redirect('login')
else:
if 'permissions' not in request.session:
print("Setting universal permission flag - this should only occur in dev or from FireRoad-internal login.")
request.session['permissions'] = APIClient.universal_permission_flag()
return view(request, *args, **kwargs) | e3bca2ba1f0bf2a82105e7a530bb0ce05f324898 | 7,114 |
def _subtract_background_one_line(data_line, e_off, e_lin, e_quad, width):
"""
Subtract background from spectra in a single line of the image
Parameters
----------
data_line : ndarray
spectra for one line of an image, size NxM, N-the number of
pixels in the line, M - the number of points in one spectrum (typically 4096)
e_off : float
offset - coefficient for polynomial approximation of energy axis
e_lin : float
linear coefficient of polynomial approximation of energy axis
e_quad : float
quadratic coefficient of polynomial approximation of energy axis
background_width : float
parameter of snip algorithm for background estimation
Returns
-------
ndarray of the same shape as data_line. Contains spectra with subtracted background.
"""
data_line = np.copy(data_line)
xx, _ = data_line.shape
for n in range(xx):
bg = snip_method(data_line[n, :],
e_off=e_off,
e_lin=e_lin,
e_quad=e_quad,
width=width)
data_line[n, :] -= bg
return data_line | 10c9928b1e9d576e404ee82a028394381963472f | 7,115 |
def clean_principals_output(sql_result, username, shell=False):
"""
Transform sql principals into readable one
"""
if not sql_result:
if shell:
return username
return [username]
if shell:
return sql_result
return sql_result.split(',') | 313d04aef55c7fd689605a19d22c801123624a51 | 7,116 |
def matchesType(value, expected):
"""
Returns boolean for whether the given value matches the given type.
Supports all basic JSON supported value types:
primitive, integer/int, float, number/num, string/str, boolean/bool, dict/map, array/list, ...
"""
result = type(value)
expected = expected.lower()
if result is int:
return expected in ("integer", "number", "int", "num", "primitive")
elif result is float:
return expected in ("float", "number", "num", "primitive")
elif result is str:
return expected in ("string", "str", "primitive")
elif result is bool:
return expected in ("boolean", "bool", "primitive")
elif result is dict:
return expected in ("dict", "map")
elif result is list:
return expected in ("array", "list")
return False | 24949f01a1bc3ae63a120d91549ae06ba52298a8 | 7,117 |
def csv_logging(record):
"""generate output in csv format"""
csv_record = ('{ts},{si},{di},{sp},{dp},{t},"{p}",{h},{v},"{ha}",'
'"{k}","{e}","{m}","{c}"')
if 'hassh' in record:
hasshType = 'client'
kexAlgs = record['ckex']
encAlgs = record['ceacts']
macAlgs = record['cmacts']
cmpAlgs = record['ccacts']
hassh = record['hassh']
hasshAlgorithms = record['hasshAlgorithms']
identificationString = record['client']
elif 'hasshServer' in record:
hasshType = 'server'
kexAlgs = record['skex']
encAlgs = record['seastc']
macAlgs = record['smastc']
cmpAlgs = record['scastc']
hassh = record['hasshServer']
hasshAlgorithms = record['hasshServerAlgorithms']
identificationString = record['server']
csv_record = csv_record.format(
ts=record['timestamp'], si=record['sourceIp'],
di=record['destinationIp'], sp=record['sourcePort'],
dp=record['destinationPort'], t=hasshType, p=identificationString,
h=hassh, v=HASSH_VERSION, ha=hasshAlgorithms, k=kexAlgs, e=encAlgs,
m=macAlgs, c=cmpAlgs)
return csv_record | 53fdbc8e634162199cec94d7cb1a7b737f08310f | 7,118 |
import regex
import re
def harvest_outfile_pass(outtext):
"""Function to read NWChem output file *outtext* and parse important
quantum chemical information from it in
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
version = ""
module = None
error = "" # TODO (wardlt): The error string is never used.
NUMBER = r"(?x:" + regex.NUMBER + ")"
# Process version
mobj = re.search(
r"^\s+" + r"Northwest Computational Chemistry Package (NWChem)" + r"\s+" + r"(?:<version>\d+.\d+)" + r"\s*$",
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched version")
version = mobj.group("version")
# Process SCF
# 1)Fail to converge
mobj = re.search(r"^\s+" + r"(?:Calculation failed to converge)" + r"\s*$", outtext, re.MULTILINE)
if mobj:
logger.debug("failed to converge")
# 2)Calculation converged
else:
mobj = re.search(r"^\s+" + r"(?:Total SCF energy)" + r"\s+=\s*" + NUMBER + r"s*$", outtext, re.MULTILINE)
if mobj:
logger.debug("matched HF")
psivar["HF TOTAL ENERGY"] = mobj.group(1)
# Process Effective nuclear repulsion energy (a.u.)
mobj = re.search(
r"^\s+" + r"Effective nuclear repulsion energy \(a\.u\.\)" + r"\s+" + NUMBER + r"\s*$",
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched NRE")
# logger.debug (mobj.group(1))
psivar["NUCLEAR REPULSION ENERGY"] = mobj.group(1)
# Process DFT dispersion energy (a.u.)
mobj = re.search(r"^\s+" + r"(?:Dispersion correction)" + r"\s+=\s*" + NUMBER + r"\s*$", outtext, re.MULTILINE)
if mobj:
logger.debug("matched Dispersion")
logger.debug(mobj.group(1))
psivar["DISPERSION CORRECTION ENERGY"] = mobj.group(1)
# Process DFT (RDFT, RODFT,UDFT, SODFT [SODFT for nwchem versions before nwchem 6.8])
mobj = re.search(r"^\s+" + r"(?:Total DFT energy)" + r"\s+=\s*" + NUMBER + r"\s*$", outtext, re.MULTILINE)
if mobj:
logger.debug("matched DFT")
logger.debug(mobj.group(1))
psivar["DFT TOTAL ENERGY"] = mobj.group(1)
# SODFT [for nwchem 6.8+]
mobj = re.search(
# fmt: off
r'^\s+' + r'Total SO-DFT energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Nuclear repulsion energy' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched DFT")
# print (mobj.group(1))
psivar["DFT TOTAL ENERGY"] = mobj.group(1)
psivar["NUCLEAR REPULSION ENERGY"] = mobj.group(2)
# MCSCF
mobj = re.search(
# fmt: off
r'^\s+' + r'Total MCSCF energy' + r'\s+=\s+' + NUMBER + r'\s*$',
# fmt: off
outtext,
re.MULTILINE | re.DOTALL,
)
if mobj:
logger.debug("matched mcscf 2") # MCSCF energy calculation
psivar["MCSCF TOTAL ENERGY"] = mobj.group(1)
mobj = re.findall(
# fmt: off
r'^\s+' + r'Total SCF energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'One-electron energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Two-electron energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total MCSCF energy' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
# for mobj_list in mobj:
if mobj: # Need to change to accommodate find all instances
logger.debug("matched mcscf") # MCSCF energy calculation
psivar["HF TOTAL ENERGY"] = mobj.group(1)
psivar["ONE-ELECTRON ENERGY"] = mobj.group(2)
psivar["TWO-ELECTRON ENERGY"] = mobj.group(3)
psivar["MCSCF TOTAL ENERGY"] = mobj.group(4)
# for mobj_list in mobj:
# for i in mobj_list:
# count += 0
# logger.debug('matched mcscf iteration %i', count)
# psivar['HF TOTAL ENERGY'] = mobj.group(1)
# psivar['ONE-ELECTRON ENERGY'] = mobj.group(2)
# psivar['TWO-ELECTRON ENERGY'] = mobj.group(3)
# psivar['MCSCF TOTAL ENERGY'] = mobj.group(4)
# Process MP2 (Restricted, Unrestricted(RO n/a))
# 1)SCF-MP2
mobj = re.search(
# fmt: off
r'^\s+' + r'SCF energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Correlation energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Singlet pairs' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Triplet pairs' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total MP2 energy' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
) # MP2
if mobj:
logger.debug("matched scf-mp2")
module = "mp2grad"
psivar["HF TOTAL ENERGY"] = mobj.group(1)
psivar["MP2 CORRELATION ENERGY"] = mobj.group(2)
psivar["MP2 TOTAL ENERGY"] = mobj.group(5)
# SCS-MP2
mobj = re.search(
# fmt: off
r'^\s+' + r'Same spin pairs' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Same spin scaling factor' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Opposite spin pairs' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Opposite spin scaling fact.' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'SCS-MP2 correlation energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total SCS-MP2 energy' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched scs-mp2", mobj.groups())
psivar["MP2 SAME-SPIN CORRELATION ENERGY"] = mobj.group(1)
psivar["MP2 OPPOSITE-SPIN CORRELATION ENERGY"] = mobj.group(3)
logger.debug(mobj.group(1)) # ess
logger.debug(mobj.group(2)) # fss
logger.debug(mobj.group(3)) # eos
logger.debug(mobj.group(4)) # fos
logger.debug(mobj.group(5)) # scs corl
logger.debug(mobj.group(6)) # scs-mp2
# 2) DFT-MP2
mobj = re.search(
# fmt: off
r'^\s+' + r'DFT energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Unscaled MP2 energy' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total DFT+MP2 energy' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched dft-mp2")
psivar["DFT TOTAL ENERGY"] = mobj.group(1)
psivar["MP2 CORRELATION ENERGY"] = mobj.group(2)
psivar["MP2 TOTAL ENERGY"] = mobj.group(3)
# 3) MP2 with CCSD or CCSD(T) calculation (through CCSD(T) directive)
mobj = re.search(
# fmt: off
r'^\s+' + r'MP2 Energy \(coupled cluster initial guess\)' + r'\s*' +
r'^\s+' + r'------------------------------------------' + r'\s*' +
r'^\s+' + r'Reference energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'MP2 Corr\. energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total MP2 energy:' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched coupled cluster-mp2")
psivar["MP2 CORRELATION ENERGY"] = mobj.group(2)
psivar["MP2 TOTAL ENERGY"] = mobj.group(3)
mobj3 = re.search(r"Final RHF results", outtext)
if mobj3:
psivar["MP2 DOUBLES ENERGY"] = mobj.group(2)
# 4) Direct MP2
mobj = re.search(
# fmt: off
r'^\s+' + r'SCF energy' + r'\s+' + r"(?P<hf>" + NUMBER + r")" + r'\s*' +
r'^\s+' + r'Correlation energy' + r'\s+' + r"(?P<mp2corl>" + NUMBER + r")" + r'\s*' +
r'^\s+' + r'Total MP2 energy' + r'\s+' + r"(?P<mp2>" + NUMBER + r")" + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
mobj2 = re.search(r"Direct MP2", outtext)
if mobj and mobj2:
logger.debug("matched direct-mp2")
module = "directmp2"
psivar["HF TOTAL ENERGY"] = mobj.group("hf")
psivar["MP2 CORRELATION ENERGY"] = mobj.group("mp2corl")
psivar["MP2 TOTAL ENERGY"] = mobj.group("mp2")
# direct-mp2 is RHF only
psivar["MP2 DOUBLES ENERGY"] = mobj.group("mp2corl")
# 5) RI-MP2
# Process calculation through tce [dertype] command
tce_cumm_corl = 0.0
for cc_name in [r"MBPT\(2\)", r"MBPT\(3\)", r"MBPT\(4\)"]:
mobj = re.search(
# fmt: off
r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' +
r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE)
if mobj:
mbpt_plain = cc_name.replace("\\", "").replace("MBPT", "MP").replace("(", "").replace(")", "")
logger.debug(f"matched tce mbpt {mbpt_plain}", mobj.groups())
tce_cumm_corl += float(mobj.group(1))
if mbpt_plain == "MP2":
mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE)
if mobj3:
psivar[f"{mbpt_plain} DOUBLES ENERGY"] = mobj.group(1)
psivar[f"CURRENT CORRELATION ENERGY"] = mobj.group(1)
psivar[f"CURRENT ENERGY"] = Decimal(mobj.group(1)) + psivar[f"HF TOTAL ENERGY"]
else:
psivar[f"{mbpt_plain} DOUBLES ENERGY"] = mobj.group(1)
psivar[f"{mbpt_plain} CORRELATION ENERGY"] = mobj.group(1)
else:
psivar[f"{mbpt_plain} CORRECTION ENERGY"] = mobj.group(1)
if not mobj3 and mbpt_plain not in ["MP4"]:
psivar[f"{mbpt_plain} DOUBLES ENERGY"] = tce_cumm_corl
psivar[f"{mbpt_plain} TOTAL ENERGY"] = mobj.group(2)
module = "tce"
# TCE dipole- MBPT(n)
mobj2 = re.search(
# fmt: off
r'^\s+' + r'dipole moments / hartree & Debye' + r'\s*' +
r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj2:
mbpt_plain = cc_name.replace("\\", "").replace("MBPT", "MP").replace("(", "").replace(")", "")
logger.debug(f"matched tce {mbpt_plain} dipole moment")
# only pulling Debye
psivar[f"{mbpt_plain} DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)])
# TCE with () or []
for cc_name in [
r"CCSD\(T\)",
r"CCSD\[T\]",
r"CCSD\(2\)_T",
r"CCSD\(2\)",
r"CCSDT\(2\)_Q",
r"CR-CCSD\[T\]",
r"CR-CCSD\(T\)",
r"LR-CCSD\(T\)",
r"LR-CCSD\(TQ\)-1",
r"CREOMSD\(T\)",
]:
mobj = re.search(
# fmt: off
r'^\s+' + cc_name + r'\s+' + r'correction energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' +
r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' +
r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
cc_plain = cc_name.replace("\\", "")
cc_corr = cc_plain.replace("CCSD", "")
logger.debug(f"matched tce cc {cc_plain}")
if cc_plain == "CCSD[T]":
psivar[f"CCSD+T(CCSD) CORRELATION ENERGY"] = mobj.group(2)
psivar[f"CCSD+T(CCSD) TOTAL ENERGY"] = mobj.group(3)
else:
# psivar[f"{cc_corr} CORRECTION ENERGY"] = mobj.group(1)
psivar[f"{cc_plain} CORRELATION ENERGY"] = mobj.group(2)
psivar[f"{cc_plain} TOTAL ENERGY"] = mobj.group(3)
module = "tce"
# TCE dipole with () or []
mobj2 = re.search(
# fmt: off
r'^\s+' + cc_name + r'dipole moments / hartree & Debye' + r'\s*' +
r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj2:
cc_plain = cc_name.replace("\\", "")
cc_corr = cc_plain.replace("CCSD", "")
logger.debug(f"matched tce {cc_plain} dipole moment")
# only pulling Debye
psivar[f"{cc_plain} DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)])
# Process other TCE cases
for cc_name in [
r"CISD",
r"QCISD",
r"CISDT",
r"CISDTQ",
r"CCD",
r"CC2",
r"CCSD",
r"CCSDT",
r"CCSDTQ",
r"LCCSD",
r"LCCD",
r"CCSDTA",
]:
mobj = re.search(
# fmt: off
r'^\s+' + r'Iterations converged' + r'\s*' +
r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' +
r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE)
logger.debug(f"matched {cc_name}", mobj.groups())
if mobj3:
pass
else:
psivar[f"{cc_name} DOUBLES ENERGY"] = mobj.group(1)
psivar[f"{cc_name} CORRELATION ENERGY"] = mobj.group(1)
psivar[f"{cc_name} TOTAL ENERGY"] = mobj.group(2)
module = "tce"
# TCE dipole
mobj2 = re.search(
# fmt: off
r'^\s+' + r'dipole moments / hartree & Debye' + r'\s*' +
r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj2:
logger.debug(f"matched tce dipole moment")
# only pulling Debye
psivar[f"CURRENT DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)])
# Process CCSD/CCSD(T) using nwchem CCSD/CCSD(T) [dertype] command
mobj = re.search(
# fmt: off
r'^\s+' + r'-----------' + r'\s*' +
r'^\s+' + r'CCSD Energy' + r'\s*' +
r'^\s+' + r'-----------' + r'\s*' +
r'^\s+' + r'Reference energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'CCSD corr\. energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total CCSD energy:' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
if mobj:
logger.debug("matched ccsd")
psivar["CCSD CORRELATION ENERGY"] = mobj.group(2)
psivar["CCSD TOTAL ENERGY"] = mobj.group(3)
module = "cc"
mobj = re.search(
# fmt: off
r'^\s+' + r'T\(CCSD\) corr\. energy:' + r'\s+' + r"(?P<tccsdcorr>" + NUMBER + r")" + r'\s*' +
r'^\s+' + r'Total CCSD\+T\(CCSD\) energy:' + r'\s+' + r"(?P<tccsdtot>" + NUMBER + r")" + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
if mobj:
logger.debug("matched ccsd+t(ccsd)")
psivar["T(CCSD) CORRECTION ENERGY"] = mobj.group("tccsdcorr")
psivar["CCSD+T(CCSD) CORRELATION ENERGY"] = Decimal(mobj.group("tccsdtot")) - psivar["HF TOTAL ENERGY"]
psivar["CCSD+T(CCSD) TOTAL ENERGY"] = mobj.group("tccsdtot")
module = "cc"
mobj = re.search(
# fmt: off
r'^\s+' + r'--------------' + r'\s*' +
r'^\s+' + r'CCSD\(T\) Energy' + r'\s*' +
r'^\s+' + r'--------------' + r'\s*' + r'(?:.*?)' +
r'^\s+' + r'\(T\) corr\. energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total CCSD\(T\) energy:' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
if mobj:
logger.debug("matched ccsd(t)")
psivar["(T) CORRECTION ENERGY"] = mobj.group(1)
psivar["CCSD(T) CORRELATION ENERGY"] = Decimal(mobj.group(2)) - psivar["HF TOTAL ENERGY"]
psivar["CCSD(T) TOTAL ENERGY"] = mobj.group(2)
module = "cc"
mobj = re.search(
# fmt: off
r'^\s+' + r'Spin Component Scaled \(SCS\) CCSD' + r'\s*' +
r'^\s+' + r'-*' + r'\s*' +
r'^\s+' + r'Same spin contribution:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Same spin scaling factor:' + r'\s+' + NUMBER + r'\s*'
r'^\s+' + r'Opposite spin contribution:' + r'\s+' + NUMBER + r'\s*' +
#r'^\s+' + r'Opposite spin scaling factor' + r'\s+' + NUMBER + r'\s*'
r'^\s+' + r'Opposite spin scaling fact.:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'SCS-CCSD correlation energy:' + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'Total SCS-CCSD energy:' + r'\s+' + NUMBER + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
# SCS-CCSD included
if mobj:
logger.debug("matched scs-ccsd", mobj.groups())
psivar["CCSD SAME-SPIN CORRELATION ENERGY"] = mobj.group(1)
psivar["CCSD OPPOSITE-SPIN CORRELATION ENERGY"] = mobj.group(3)
# psivar['CCSD SAME-SPIN CORRELATION ENERGY'] = psivar['SCS-CCSD SAME-SPIN CORRELATION ENERGY'] = (
# Decimal(mobj.group(1)) * Decimal(mobj.group(2)))
# psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = psivar['SCS-CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = (
# Decimal(mobj.group(4)) * Decimal(mobj.group(3)))
# psivar['SCS-CCSD CORRELATION ENERGY'] = mobj.group(5)
# psivar['SCS-CCSD TOTAL ENERGY'] = mobj.group(6)
# psivar['CUSTOM SCS-CCSD CORRELATION ENERGY'] = 0.5 * (float(
# psivar['CCSD SAME-SPIN CORRELATION ENERGY']) + float(psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY']))
# psivar['CUSTOM SCS-CCSD TOTAL ENERGY'] = float(mobj.group(6)) + float(
# psivar['CUSTOM SCS-CCSD CORRERLATION ENERGY'])
# Process EOM-[cc_name] #nwchem_tce_dipole = false
# Parsed information: each symmetry, root excitation energy in eV and total energy in hartree
# psivar name might need to be fixed
# each root excitation energy is extracted from the last iteration of right hand side
mobj = re.findall(
# fmt: off
r'^\s+(?:Excited-state calculation \( )(.*)\s+(?:symmetry\))\s+(?:.*\n)*^\s+EOM-' + cc_name +
# (..) captures symmetry
r'right-hand side iterations\s+(?:.*\n)*(?:Excited state root)\s+' + NUMBER + #root
r'\s*(?:Excitation energy / hartree)\s+.\s+' + NUMBER + #excitation energy hartree
r'\s*(?:/ eV)\s+.\s+' + NUMBER + r'\s*$',
# excitation energy eV
# fmt: on
outtext,
re.MULTILINE | re.DOTALL,
)
# regex should be more dynamic in finding values, need to revisit
# mobj.group(0) = symmetry value
# mobj.group(1) = cc_name
# mobj.group(2) = root number
# mobj.group(3) = excitation energy (hartree)
# mobj.group(4) = excitation energy (eV)
if mobj:
logger.debug(mobj)
ext_energy = {} # dic
ext_energy_list = []
logger.debug(f"matched eom-{cc_name}")
for mobj_list in mobj:
logger.debug("matched EOM-%s - %s symmetry" % (cc_name, mobj_list[0])) # cc_name, symmetry
logger.debug(mobj_list)
count = 0
for line in mobj_list[1].splitlines():
lline = line.split()
logger.debug(lline[1]) # in hartree
logger.debug(lline[2]) # in eV
count += 1
logger.debug("matched excitation energy #%d - %s symmetry" % (count, mobj_list[0]))
ext_energy_list.append(lline[1]) # Collect all excitation energies
sym = str(mobj_list[0])
ext_energy.setdefault(sym, [])
ext_energy[sym].append(lline[1]) # Dictionary: symmetries(key), energies(value)
ext_energy_list.sort(key=float)
for nroot in range(len(ext_energy_list)):
for k, e_val in ext_energy.items():
if ext_energy_list[nroot] in e_val:
symm = k
# in hartree
psivar[
f"EOM-{cc_name} ROOT 0 -> ROOT {nroot + 1} EXCITATION ENERGY - {symm} SYMMETRY"
] = ext_energy_list[nroot]
psivar[f"EOM-{cc_name} ROOT 0 -> ROOT {nroot + 1} TOTAL ENERGY - {symm} SYMMETRY"] = psivar[
f"{cc_name} TOTAL ENERGY"
] + Decimal(ext_energy_list[nroot])
gssym = ""
gs = re.search(r"^\s+" + r"Ground-state symmetry is" + gssym + r"\s*$", outtext, re.MULTILINE)
if gs:
logger.debug("matched ground-state symmetry")
psivar["GROUND-STATE SYMMETRY"] = gssym.group(1)
# Process TDDFT
# 1) Spin allowed
mobj = re.findall(
# fmt: off
r'^\s+(?:Root)\s+(\d+)\s+(.*?)\s+' + NUMBER + r'\s+(?:a\.u\.)\s+' + NUMBER + r"\s+eV\s*" +
r"^\s+" + r"<S2>\s+=\s+" + NUMBER + r"\s*"
#Root | symmetry | a.u. | eV
# unkn units for dip/quad
+ r'\s+(?:.*\n)\s+Transition Moments\s+X\s+'+ NUMBER + r'\s+Y\s+'+ NUMBER+ r'\s+Z\s+'+ NUMBER #dipole
+ r'\s+Transition Moments\s+XX\s+'+ NUMBER + r'\s+XY\s+'+ NUMBER+ r'\s+XZ\s+'+ NUMBER #quadrople
+ r'\s+Transition Moments\s+YY\s+'+ NUMBER + r'\s+YZ\s+'+ NUMBER+ r'\s+ZZ\s+'+ NUMBER #quadrople
+ r"\s+" + r"Dipole Oscillator Strength" + r"\s+" + NUMBER + r"\s*$",
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched TDDFT with transition moments")
for mobj_list in mobj:
logger.debug(mobj_list)
iroot = mobj_list[0]
sym = mobj_list[1]
# in eV
psivar[f"TDDFT ROOT {iroot} EXCITATION ENERGY - {sym} SYMMETRY"] = mobj_list[2]
psivar[f"TDDFT ROOT {iroot} EXCITED STATE ENERGY - {sym} SYMMETRY"] = psivar[
"DFT TOTAL ENERGY"
] + Decimal(mobj_list[2])
psivar[f"TDDFT ROOT 0 -> ROOT {iroot} DIPOLE"] = [
float(mobj_list[5]),
float(mobj_list[6]),
float(mobj_list[7]),
]
psivar[f"TDDFT ROOT 0 -> ROOT {iroot} QUADRUPOLE"] = [
float(mobj_list[8]),
float(mobj_list[9]),
float(mobj_list[10]),
float(mobj_list[9]),
float(mobj_list[11]),
float(mobj_list[12]),
float(mobj_list[10]),
float(mobj_list[12]),
float(mobj_list[13]),
]
psivar[f"TDDFT ROOT 0 -> ROOT {iroot} OSCILLATOR STRENGTH (LEN)"] = mobj_list[14]
# 2) Spin forbidden
mobj = re.findall(
# fmt: off
r'^\s+(?:Root)\s+(\d+)\s+(.*?)\s+' + NUMBER + r'\s(?:a\.u\.)\s+' + NUMBER + r'\s+(?:\w+)' # Root | symmetry | a.u. | eV
+ r'\s+(?:.\w+.\s+.\s+\d+.\d+)' # s2 value
+ r'\s+Transition Moments\s+(?:Spin forbidden)' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
# mobj.group(0) = Root
# mobj.group(1) = symmetry
# mobj.group(2) a.u.
# mobj.group(3) e.V
# mobj.group(4) Excitation energy
# mobj.group(5) Excited state energy
if mobj:
logger.debug("matched TDDFT - spin forbidden")
for mobj_list in mobj:
#### temporary psivars ####
# in eV
psivar[f"TDDFT ROOT {mobj_list[0]} EXCITATION ENERGY - {mobj_list[2]} SYMMETRY"] = mobj_list[4]
psivar[f"TDDFT ROOT {mobj_list[0]} EXCITED STATE ENERGY - {mobj_list[2]} SYMMETRY"] = psivar[
"DFT TOTAL ENERGY"
] + qcel.constants.converstion_factor("eV", "hartree") * Decimal(mobj_list[4])
# psivar['TDDFT ROOT %s %s %s EXCITATION ENERGY' %
# (mobj_list[0], mobj_list[1], mobj_list[2])] = mobj_list[3] # in a.u.
# psivar['TDDFT ROOT %s %s %s EXCITED STATE ENERGY' %(mobj_list[0], mobj_list[1], mobj_list[2])] = \
# psivar['DFT TOTAL ENERGY'] + Decimal(mobj_list[3])
if mobj:
logger.debug("Non-variation initial energy") # prints out energy, 5 counts
# Process geometry
# 1) CHARGE
# Read charge from SCF module
mobj = re.search(
r"^\s+" + r"charge =" + r"\s+" + NUMBER + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE
)
if mobj:
logger.debug("matched charge")
out_charge = int(float(mobj.group(1)))
# Read charge from General information (not scf module)
mobj = re.search(
r"^\s+" + r"Charge :" + r"\s+" + r"(-?\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE
)
if mobj:
logger.debug("matched charge")
out_charge = int(float(mobj.group(1)))
# 2) MULTIPLICITY
# Read multiplicity from SCF module
mobj = re.search(
r"^\s+" + r"open shells =" + r"\s+" + r"(\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE
)
calcinfo = False
if mobj:
logger.debug("matched multiplicity")
out_mult = int(mobj.group(1)) + 1
# Read multiplicity from SCF module through alpha, beta electrons
mobj = re.search(
# fmt: off
r'^\s+' + r'alpha electrons =' + r'\s+' + r'(\d+)' + r'\s*' +
r'^\s+' + r'beta electrons =' + r'\s+' + r'(\d+)' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj:
logger.debug("matched multiplicity via alpha and beta electrons 0")
out_mult = int(mobj.group(1)) - int(mobj.group(2)) + 1 # nopen + 1
psivar["N ALPHA ELECTRONS"] = mobj.group(1)
psivar["N BETA ELECTRONS"] = mobj.group(2)
mobj = re.search(
# fmt: off
r"^\s+" + r"Basis functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" +
r"^\s+" + r"Molecular orbitals" + r"\s+=\s+" + r"(?P<nmo>\d+)" + r"\s*" +
r"^\s+" + r"Frozen core" + r"\s+=\s+" + r"(?P<nfc>\d+)" + r"\s*" +
r"^\s+" + r"Frozen virtuals" + r"\s+=\s+" + r"(?P<nfv>\d+)" + r"\s*" +
r"^\s+" + r"Active alpha occupied" + r"\s+=\s+" + r"(?P<nao>\d+)" + r"\s*" +
r"^\s+" + r"Active beta occupied" + r"\s+=\s+" + r"(?P<nbo>\d+)" + r"\s*" +
r"^\s+" + r"Active alpha virtual" + r"\s+=\s+" + r"(?P<nav>\d+)" + r"\s*" +
r"^\s+" + r"Active beta virtual" + r"\s+=\s+" + r"(?P<nbv>\d+)" + r"\s*",
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj:
logger.debug("matched alpha and beta electrons 1", mobj.groups())
calcinfo = True
psivar["N BASIS FUNCTIONS"] = mobj.group("nbf")
psivar["N MOLECULAR ORBITALS"] = mobj.group("nmo")
psivar["N ALPHA ELECTRONS"] = int(mobj.group("nao")) + int(mobj.group("nfc"))
psivar["N BETA ELECTRONS"] = int(mobj.group("nbo")) + int(mobj.group("nfc"))
mobj = re.search(
# fmt: off
r"^\s+" + "No. of electrons" + r"\s+:\s+" + r"(?P<ne>\d+)" + r"\s*" +
r"^\s+" + "Alpha electrons" + r"\s+:\s+" + r"(?P<nae>\d+)" + r"\s*" +
r"^\s+" + "Beta electrons" + r"\s+:\s+" + r"(?P<nbe>\d+)" + r"\s*" +
r"^\s+" + "No. of orbitals" + r"\s+:\s+" + r"(?P<no>\d+)" + r"\s*" +
r"^\s+" + "Alpha orbitals" + r"\s+:\s+" + r"(?P<namo>\d+)" + r"\s*" +
r"^\s+" + "Beta orbitals" + r"\s+:\s+" + r"(?P<nbmo>\d+)" + r"\s*" +
r"^\s+" + "Alpha frozen cores" + r"\s+:\s+" + r"(?P<nafc>\d+)" + r"\s*" +
r"^\s+" + "Beta frozen cores" + r"\s+:\s+" + r"(?P<nbfc>\d+)" + r"\s*" +
r"^\s+" + "Alpha frozen virtuals" + r"\s+:\s+" + r"(?P<nafv>\d+)" + r"\s*" +
r"^\s+" + "Beta frozen virtuals" + r"\s+:\s+" + r"(?P<nbfv>\d+)" + r"\s*" +
r"^\s+" + "Spin multiplicity" + r"\s+:\s+\w+" + r"\s*" +
r"^\s+" + "Number of AO functions" + r"\s+:\s+" + r"(?P<nbf>\d+)" + r"\s*",
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj and not calcinfo:
logger.debug("matched alpha and beta electrons 2", mobj.groups())
calcinfo = True
psivar["N BASIS FUNCTIONS"] = mobj.group("nbf")
psivar["N MOLECULAR ORBITALS"] = (int(mobj.group("namo")) + int(mobj.group("nbmo"))) / 2
psivar["N ALPHA ELECTRONS"] = mobj.group("nae")
psivar["N BETA ELECTRONS"] = mobj.group("nbe")
mobj = re.search(
# fmt: off
r"^\s+" + "functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" +
r"^\s+" + "atoms" + r"\s+=\s+" + r"(?P<nat>\d+)" + r"\s*" +
r"^\s+" + "alpha electrons" + r"\s+=\s+" + r"(?P<nae>\d+)" + r"\s*" +
r"^\s+" + "beta electrons" + r"\s+=\s+" + r"(?P<nbe>\d+)" + r"\s*",
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj and not calcinfo:
logger.debug("matched alpha and beta electrons 3", mobj.groups())
calcinfo = True
psivar["N BASIS FUNCTIONS"] = mobj.group("nbf")
psivar["N MOLECULAR ORBITALS"] = mobj.group("nbf")
psivar["N ALPHA ELECTRONS"] = mobj.group("nae")
psivar["N BETA ELECTRONS"] = mobj.group("nbe")
mobj = re.search(
# fmt: off
r"^\s+" + "functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" +
r"^\s+" + "atoms" + r"\s+=\s+" + r"(?P<nat>\d+)" + r"\s*" +
r"^\s+" + "closed shells" + r"\s+=\s+" + r"(?P<ncl>\d+)" + r"\s*" +
r"^\s+" + "open shells" + r"\s+=\s+" + r"(?P<nop>\d+)" + r"\s*",
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj and not calcinfo:
logger.debug("matched alpha and beta electrons 4", mobj.groups())
calcinfo = True
psivar["N BASIS FUNCTIONS"] = mobj.group("nbf")
psivar["N MOLECULAR ORBITALS"] = mobj.group("nbf") # BAD! TODO
psivar["N ALPHA ELECTRONS"] = int(mobj.group("ncl")) + int(mobj.group("nop"))
psivar["N BETA ELECTRONS"] = mobj.group("ncl")
# Read multiplicity from General information (not scf module)
mobj = re.search(
r"^\s+" + r"Spin multiplicity:" + r"\s+" + r"(\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE
)
if mobj:
logger.debug("matched multiplicity")
out_mult = int(mobj.group(1))
# 3) Initial geometry
mobj = re.search(
# fmt: off
r'^\s+' + r'Geometry' + r'.*' + r'\s*' +
r'^\s+' + r'(?:-+)\s*' + r'\s+' + r'\n' +
r'^\s' + r'Output coordinates in ' + r'(.*?)' + r'\s' + r'\(scale by' + r'.*' + r'\s' + r'to convert to a\.u\.\)' + r'\s+' + r'\n' +
r'^\s+' + r'No\.\ Tag Charge X Y Z' + r'\s*' +
r'^\s+' + r'---- ---------------- ---------- -------------- -------------- --------------' + r'\s*' +
r'((?:\s+([1-9][0-9]*)+\s+([A-Z][a-z]*)+\s+\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE | re.IGNORECASE,
)
if mobj:
logger.debug("matched geom")
# dinky molecule w/ charge and multiplicity
if mobj.group(1) == "angstroms":
molxyz = "%d \n%d %d tag\n" % (len(mobj.group(2).splitlines()), out_charge, out_mult) # unit = angstrom
for line in mobj.group(2).splitlines():
lline = line.split()
molxyz += "%s %16s %16s %16s\n" % (lline[-5], lline[-3], lline[-2], lline[-1])
# Jiyoung was collecting charge (-4)? see if this is ok for ghosts
# Tag , X, Y, Z
psivar_coord = Molecule(
validate=False,
**qcel.molparse.to_schema(
qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"],
dtype=2,
),
)
else: # unit = a.u.
molxyz = "%d au\n%d %d tag\n" % (len(mobj.group(2).splitlines()), out_charge, out_mult)
for line in mobj.group(2).splitlines():
lline = line.split()
molxyz += "%s %16s %16s %16s\n" % (int(float(lline[-4])), lline[-3], lline[-2], lline[-1])
# Tag , X, Y, Z
psivar_coord = Molecule(
validate=False,
**qcel.molparse.to_schema(
qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"],
dtype=2,
),
)
# Process gradient
mobj = re.search(
# fmt: off
r'^\s+' + r'.*' + r'ENERGY GRADIENTS' + r'\s*' + r'\s+' + r'\n' +
r'^\s+' + r'atom coordinates gradient' + r'\s*' +
r'^\s+' + r'x y z x y z' + r'\s*' +
r'((?:\s+([1-9][0-9]*)+\s+([A-Z][a-x]*)+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched molgrad")
atoms = []
psivar_grad = []
for line in mobj.group(1).splitlines():
lline = line.split() # Num, Tag, coord x, coord y, coord z, grad x, grad y, grad z
# print (lline)
if lline == []:
pass
else:
atoms.append(lline[1]) # Tag
psivar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])
psivar_grad = np.array(psivar_grad).reshape((-1, 3))
# Process dipole (Properties)
mobj = re.search(
# fmt: off
r'^\s+' + r'Dipole moment' + r'\s+' + NUMBER + r'\s+' + r'A\.U\.' + r'\s*' +
r'^\s+' + r'DMX' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'DMY' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'DMZ' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'.*' +
r'^\s+' + r'Total dipole' + r'\s+' + NUMBER + r'\s+' + r'A\.U\.' + r'\s*' +
r'^\s+' + r'Dipole moment' + r'\s+' + NUMBER + r'\s' + r'Debye\(s\)' + r'\s*' +
r'^\s+' + r'DMX' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'DMY' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'DMZ' + r'\s+' + NUMBER + r'.*' +
r'^\s+' + r'.*' +
r'^\s+' + r'Total dipole' + r'\s+' + NUMBER + r'\s' + r'DEBYE\(S\)' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched total dipole")
# UNIT = DEBYE(S)
psivar[f"CURRENT DIPOLE"] = d2au * np.array([mobj.group(7), mobj.group(8), mobj.group(9)])
# total?
# Process error code
mobj = re.search(
# fmt: off
r'^\s+' + r'current input line \:' + r'\s*' +
r'^\s+' + r'([1-9][0-9]*)' + r'\:' + r'\s+' + r'(.*)' + r'\s*' +
r'^\s+' r'------------------------------------------------------------------------' + r'\s*' +
r'^\s+' r'------------------------------------------------------------------------' + r'\s*' +
r'^\s+' + r'There is an error in the input file' + r'\s*$',
# fmt: on
outtext,
re.MULTILINE,
)
if mobj:
logger.debug("matched error")
# print (mobj.group(1)) #error line number
# print (mobj.group(2)) #error reason
psivar["NWCHEM ERROR CODE"] = mobj.group(1)
# TODO process errors into error var
# Get the size of the basis sets, etc
mobj = re.search(r"No. of atoms\s+:\s+(\d+)", outtext, re.MULTILINE)
if mobj:
psivar["N ATOMS"] = mobj.group(1)
mobj = re.search(
r"No. of electrons\s+:\s+(\d+)\s+Alpha electrons\s+:\s+(\d+)\s+Beta electrons\s+:\s+(\d+)",
outtext,
re.MULTILINE,
)
if mobj:
psivar["N ALPHA ELECTRONS"] = mobj.group(2)
psivar["N BETA ELECTRONS"] = mobj.group(3)
if psivar["N ALPHA ELECTRONS"] == psivar["N BETA ELECTRONS"]:
# get HOMO and LUMO energy
mobj = re.search(
r"Vector"
+ r"\s+"
+ r"%d" % (psivar["N ALPHA ELECTRONS"])
+ r"\s+"
+ r"Occ="
+ r".*"
+ r"\s+"
+ r"E="
+ r"([+-]?\s?\d+[.]\d+)"
+ r"[D]"
+ r"([+-]0\d)",
outtext,
re.MULTILINE,
)
if mobj:
homo = float(mobj.group(1)) * (10 ** (int(mobj.group(2))))
psivar["HOMO"] = np.array([round(homo, 10)])
mobj = re.search(
r"Vector"
+ r"\s+"
+ r"%d" % (psivar["N ALPHA ELECTRONS"] + 1)
+ r"\s+"
+ r"Occ="
+ r".*"
+ r"\s+"
+ r"E="
+ r"([+-]?\s?\d+[.]\d+)"
+ r"[D]"
+ r"([+-]0\d)",
outtext,
re.MULTILINE,
)
if mobj:
lumo = float(mobj.group(1)) * (10 ** (int(mobj.group(2))))
psivar["LUMO"] = np.array([round(lumo, 10)])
mobj = re.search(r"AO basis - number of functions:\s+(\d+)\s+number of shells:\s+(\d+)", outtext, re.MULTILINE)
if mobj:
psivar["N MOLECULAR ORBITALS"] = mobj.group(1)
psivar["N BASIS FUNCTIONS"] = mobj.group(1)
# Search for Center of charge
mobj = re.search(
r"Center of charge \(in au\) is the expansion point"
+ r"\n"
+ r"\s+"
+ r"X\s+=\s+([+-]?\d+[.]\d+)"
+ r"\s+"
+ r"Y\s+=\s+([+-]?\d+[.]\d+)"
+ r"\s+"
+ r"Z\s+=\s+([+-]?\d+[.]\d+)",
outtext,
re.MULTILINE,
)
if mobj:
psivar["CENTER OF CHARGE"] = np.array([mobj.group(1), mobj.group(2), mobj.group(3)])
mobj = re.search(
r"Dipole moment"
+ r".*?"
+ r"A\.U\."
+ r"\s+"
+ r"DMX\s+([+-]?\d+[.]\d+)\s+"
+ r"DMXEFC\s+[+-]?\d+[.]\d+\s+"
+ r"DMY\s+([+-]?\d+[.]\d+)\s+"
+ r"DMYEFC\s+[+-]?\d+[.]\d+\s+"
+ r"DMZ\s+([+-]?\d+[.]\d+)\s+"
+ r"DMZEFC\s+[+-]?\d+[.]\d+\s+"
+ r"\-EFC\-"
+ r".*?"
+ r"A\.U\.\s+"
+ r"Total dipole\s+([+-]?\d+[.]\d+\s+)",
outtext,
re.MULTILINE,
)
# + r"DMY\s+" + r"([+-]?\d+[.]\d+)", outtext, re.MULTILINE)
if mobj:
psivar["DIPOLE MOMENT"] = np.array([mobj.group(1), mobj.group(2), mobj.group(3)])
psivar["TOTAL DIPOLE MOMENT"] = mobj.group(4)
# Process CURRENT energies (TODO: needs better way)
if "HF TOTAL ENERGY" in psivar:
psivar["SCF TOTAL ENERGY"] = psivar["HF TOTAL ENERGY"]
psivar["CURRENT REFERENCE ENERGY"] = psivar["HF TOTAL ENERGY"]
psivar["CURRENT ENERGY"] = psivar["HF TOTAL ENERGY"]
if "MCSCF TOTAL ENERGY" in psivar:
psivar["CURRENT REFERENCE ENERGY"] = psivar["MCSCF TOTAL ENERGY"]
psivar["CURRENT CORRELATION ENERGY"] = 0.0
psivar["CURRENT ENERGY"] = psivar["MCSCF TOTAL ENERGY"]
if "MP2 TOTAL ENERGY" in psivar and "MP2 CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["MP2 CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["MP2 TOTAL ENERGY"]
if "MP3 TOTAL ENERGY" in psivar and "MP3 CORRECTION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["MP3 TOTAL ENERGY"] - psivar["HF TOTAL ENERGY"]
psivar["CURRENT ENERGY"] = psivar["MP3 TOTAL ENERGY"]
if "MP4 TOTAL ENERGY" in psivar and "MP4 CORRECTION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["MP4 TOTAL ENERGY"] - psivar["HF TOTAL ENERGY"]
psivar["CURRENT ENERGY"] = psivar["MP4 TOTAL ENERGY"]
if "CISD TOTAL ENERGY" in psivar and "CISD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CISD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CISD TOTAL ENERGY"]
if "QCISD TOTAL ENERGY" in psivar and "QCISD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["QCISD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["QCISD TOTAL ENERGY"]
if "LCCD TOTAL ENERGY" in psivar and "LCCD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["LCCD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["LCCD TOTAL ENERGY"]
if "LCCSD TOTAL ENERGY" in psivar and "LCCSD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["LCCSD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["LCCSD TOTAL ENERGY"]
if "DFT TOTAL ENERGY" in psivar:
psivar["CURRENT REFERENCE ENERGY"] = psivar["DFT TOTAL ENERGY"]
psivar["CURRENT ENERGY"] = psivar["DFT TOTAL ENERGY"]
# Process TCE CURRENT energies
# Need to be fixed
# HOW TO KNOW options['NWCHEM']['NWCHEM_TCE']['value']?
# TODO: CURRENT ENERGY = TCE ENERGY
if "%s TOTAL ENERGY" % (cc_name) in psivar and ("%s CORRELATION ENERGY" % (cc_name) in psivar):
psivar["CURRENT CORRELATION ENERGY"] = psivar["%s CORRELATION ENERGY" % (cc_name)]
psivar["CURRENT ENERGY"] = psivar["%s TOTAL ENERGY" % (cc_name)]
if "CCD TOTAL ENERGY" in psivar and "CCD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CCD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CCD TOTAL ENERGY"]
if "CCSD TOTAL ENERGY" in psivar and "CCSD CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CCSD TOTAL ENERGY"]
if "CCSD+T(CCSD) TOTAL ENERGY" in psivar and "CCSD+T(CCSD) CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD+T(CCSD) CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CCSD+T(CCSD) TOTAL ENERGY"]
if "CCSD(T) TOTAL ENERGY" in psivar and "CCSD(T) CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD(T) CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CCSD(T) TOTAL ENERGY"]
if "CCSDT TOTAL ENERGY" in psivar and "CCSDT CORRELATION ENERGY" in psivar:
psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSDT CORRELATION ENERGY"]
psivar["CURRENT ENERGY"] = psivar["CCSDT TOTAL ENERGY"]
if ("EOM-%s TOTAL ENERGY" % (cc_name) in psivar) and ("%s EXCITATION ENERGY" % (cc_name) in psivar):
psivar["CURRENT ENERGY"] = psivar["EOM-%s TOTAL ENERGY" % (cc_name)]
psivar["CURRENT EXCITATION ENERGY"] = psivar["%s EXCITATION ENERGY" % (cc_name)]
psivar[f"N ATOMS"] = len(psivar_coord.symbols)
return psivar, psivar_coord, psivar_grad, version, module, error | 5caeb239f7ed51f5284c5c2b7813bdafb34da20c | 7,119 |
import os
def create_denoising_dataset(epi_path,log_path,acqtimes_path,rot_dir=-1, interactive=True, img_dir=None, slice_indices=None, inner_mask_level=.004):
"""Generates masks and timeseries for analysis.
Parameters
----------
epi_path : str
Path to the phantom data.
log_path : str
Path to the BrainDancer movement log file.
acqtimes_path : str
Path to the slice timing file.
interactive : bool, optional
If True (default), prompt user for decision inputs
img_dir : str, optional
If specified, displays will be saved to file. Otherwise, display onscreen.
slice_indices : tuple, optional
start and end indices of slices to include for processing.
inner_mask_level : float
Threshold for segmenting the phantom.
"""
data_read = nib.load(epi_path)
if interactive:
display_all_slices(data_read,0)
start = int(input('Enter the first good slice: '))
end = int(input('Enter the last good slice: '))
# non-interactive. select specified slices
elif slice_indices is not None:
start = slice_indices[0]
end = slice_indices[1]
slice_img_path = os.path.join(img_dir, 'slices.png')
display_all_slices(data_read,0, file=slice_img_path, subset=np.arange(start, end+1))
# non-interactive, but empty slice list
else:
raise TypeError('slice_indices cannot be None in non-interactive mode')
with open(log_path, 'r') as fp:
line = fp.readline()
if line.startswith('Sequence'):
# skip lines
log = pd.read_csv(log_path, header=2)
else:
log = pd.read_csv(log_path)
motion_time = np.max(log['Tmot'].values)
acq_times = pd.read_csv(acqtimes_path)
motionfree = acq_times[acq_times['Time']>motion_time]['Slice'].values
total_slices = []
for i in list(motionfree):
if start<= i <= end:
total_slices.append(i)
print('Selected Slices for Analysis are: ', total_slices)
imask = []
cen = []
imask_metrics = []
center_rotation_all = []
omask = []
detect_remove = []
updated_total_slices = []
good_slices = []
for i in range(len(total_slices)):
if interactive:
level_img_path = None
center_img_path = None
else:
level_img_path = os.path.join(img_dir, f'contours_{i:03d}.png')
center_img_path = os.path.join(img_dir, f'centers_{i:03d}.png')
img_complete,cy_complete,cx_complete, radii_complete = inner_mask(epi_path,total_slices[i],volume_num=0,lvl=inner_mask_level,rad1=7,rad2=50,step=1, img_path=level_img_path)
center_rotation = cen_rotation(epi_path,total_slices[i],img_complete,cy_complete,cx_complete,radii_complete, canny_sgm=1, img_path=center_img_path)
if interactive:
detect = int(input('Enter 1 if this slice is good'))
good_slices.append(detect)
center_rotation_all.append(center_rotation)
imask.append(img_complete)
updated_total_slices.append(total_slices[i])
# TO DO - Include the option to generate outer mask and corresponding time_series, with something like below:
#out_mask = outer_mask(data_read,findcartridge(data_read,total_slices[i],0),total_slices[i],0)
#omask.append(out_mask)
# update good slices
if not interactive:
row_med = np.median([x[0] for x in center_rotation_all])
col_med = np.median([x[1] for x in center_rotation_all])
for row_cor,col_cor in center_rotation_all:
if np.all([row_cor <= row_med+1, row_cor >= row_med-1, col_cor <= col_med+1, col_cor >= col_med-1]):
good_slices.append(1)
else:
good_slices.append(0)
print(good_slices)
print(center_rotation_all)
center_rotation_all = [x for good,x in zip(good_slices, center_rotation_all) if good==1 ]
imask = [x for good,x in zip(good_slices, imask) if good==1 ]
updated_total_slices = [x for good,x in zip(good_slices, updated_total_slices) if good==1 ]
print(good_slices)
print(center_rotation_all)
if img_dir is not None:
motion_img = os.path.join(img_dir, 'motion.png')
else:
motion_img = None
positions = phantom_motion(log_path, img_path=motion_img)
synth = create_mean_slices(data_read,updated_total_slices,imask,200)
simulated_data = simulate_inner(synth,positions,updated_total_slices,imask,center_rotation_all,rot_dir)
scanner_inner = scanner_output(data_read,positions,updated_total_slices,imask,200) # add omask in future for outer cylinder
return simulated_data, scanner_inner, imask, center_rotation_all, updated_total_slices | fca174693832f36d9e2587ddb1b53d69ab753371 | 7,120 |
import itertools
from typing import Counter
def get_top_words(keywords):
"""
Orders the topics from most common to least common for displaying.
"""
keywords = itertools.chain.from_iterable(map(str.split, keywords))
top_words = list(Counter(keywords))
return top_words | 307a5a0e0e900e411097a84d19daf0ca7187c9bc | 7,121 |
import sys
def is_mac():
"""
Check if mac os
>>> print(is_mac())
True or False
Returns
-------
bool: bool
True or False.
"""
if sys.platform == 'darwin':
return True
return False | d6f6dabaafe19fd2c92945a1e132b9f10852ec9a | 7,122 |
import os
def login(i):
"""
Input: {
(sudo) - if 'yes', add sudo
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
s='docker login'
if i.get('sudo','')=='yes':
s='sudo '+s
os.system(s)
return {'return':0} | 2272134b81ee330ef56a36158886369ae7496ade | 7,123 |
def obj_prop(*args, **kwargs):
"""
Build an object property wrapper.
If no arguments (or a single ``None`` argument) are suppled, return a dummy property.
If one argument is supplied, return :class:`AttrObjectProperty` for a property with a given name.
Otherwise, return :class:`MethodObjectProperty` property.
"""
if len(args)==0:
return empty_object_property()
if len(args)==1:
if args[0] is None: # empty property
return empty_object_property()
return AttrObjectProperty(args[0],**kwargs)
elif len(args)<=3:
return MethodObjectProperty(*args,**kwargs)
else:
raise ValueError("invalid number of arguments") | 9b2e7e28c7b68cafdcd39a447a5dcb15c493e399 | 7,124 |
def CreateDataObject(**kwargs):
"""
Creates a new Data Object by issuing an identifier if it is not
provided.
:param kwargs:
:return:
"""
# TODO Safely create
body = kwargs['body']['data_object']
doc = create(body, 'data_objects')
return({"data_object_id": doc['id']}, 200) | a0c6845457f097769295b0d3d3694a6df544f2a9 | 7,125 |
from typing import Iterable
def _check_name(name: str, invars: Iterable[str]) -> str:
"""Check if count is valid"""
if name is None:
name = _n_name(invars)
if name != "n":
logger.warning(
"Storing counts in `%s`, as `n` already present in input. "
'Use `name="new_name" to pick a new name.`'
)
elif not isinstance(name, str):
raise ValueError("`name` must be a single string.")
return name | 1dd4fce937e9a48a64147b9c4a03f713e7f7c433 | 7,126 |
def get_documents(corpus_tag):
"""
Returns a list of documents with a particular corpus tag
"""
values = db.select("""
SELECT doc_id
FROM document_tag
WHERE tag=%(tag)s
ORDER BY doc_id
""", tag=corpus_tag)
return [x.doc_id for x in values] | 933dd00e76475fbd14e4cd8b3dff9e918d98ff46 | 7,127 |
def draw_with_indeces(settings):
"""
Drawing function that displays the input smiles string with all atom indeces
"""
m = Chem.MolFromSmiles(settings['SMILESSTRING'])
dm = Draw.PrepareMolForDrawing(m)
d2d = Draw.MolDraw2DSVG(350,350)
opts = d2d.drawOptions()
for i in range(m.GetNumAtoms()):
opts.atomLabels[i] = m.GetAtomWithIdx(i).GetSymbol()+str(i)
d2d.DrawMolecule(dm)
d2d.FinishDrawing()
return d2d.GetDrawingText() | b32b7031e97c264630e0cf6024c60b7eb87c6ff9 | 7,128 |
from app.extensions.celerybackend import models
from app.extensions.logger.models import Log
from app.modules.auth.models import User
from app.utils import local
def get_main_page_info():
"""获取首页统计信息
:return info: Dict 统计信息
"""
task_cnt = models.Tasks.objects(time_start__gte=local.localdate()).count()
user_cnt = User.query.count()
new_user_cnt = User.query.filter(User.created > local.localdate()).count()
log_cnt = Log.objects(
created__gte=local.localdate(), module__nin=["static", "admin", "unknown"]
).count()
task_success_cnt = models.Tasks.objects(
time_start__gte=local.localdate(), state="success"
).count()
task_run_cnt = models.Tasks.objects(
time_start__gte=local.localdate(), state="run"
).count()
task_fail_cnt = models.Tasks.objects(
time_start__gte=local.localdate(), state="fail"
).count()
if task_success_cnt == 0:
task_success = 0
else:
task_success = int(task_success_cnt / task_cnt * 100)
if task_run_cnt == 0:
task_run = 0
else:
task_run = int(task_run_cnt / task_cnt * 100)
if task_fail_cnt == 0:
task_fail = 0
else:
task_fail = int(task_fail_cnt / task_cnt * 100)
info = {
"task": task_cnt,
"user": user_cnt,
"new_user": new_user_cnt,
"log": log_cnt,
}
return info, task_success, task_run, task_fail | 5a8c67dcafd0f822102f89195726cb7648b136fb | 7,129 |
def get_tablenames(cur):
""" Conveinience: """
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tablename_list_ = cur.fetchall()
tablename_list = [str(tablename[0]) for tablename in tablename_list_ ]
return tablename_list | 311335c38d9ea19396da3292513e3e1d7bd5caf0 | 7,130 |
import urllib
def reverse_geocode(userCoords):
"""
Returns the city, state (or equivalent administrative region), and country
that the specified point is in
userCoords is a tuple: (latitude, longitude)
"""
lat, lng = userCoords
latlng = "{0},{1}".format(lat, lng)
data = urllib.parse.urlencode({"latlng" : latlng,
"result_type" : "locality",
"key" : API_KEY})
result = make_google_api_request(API_URL + data)
if result["status"] == "OK":
return result["results"][0]["formatted_address"]
else:
return "Status: " + result["status"] | b38d9585033c012ea6a90a14f2f321a538b42e86 | 7,131 |
def match_red_baselines(model, model_antpos, data, data_antpos, tol=1.0, verbose=True):
"""
Match unique model baseline keys to unique data baseline keys based on positional redundancy.
Ideally, both model and data contain only unique baselines, in which case there is a
one-to-one mapping. If model contains extra redundant baselines, these are not propagated
to new_model. If data contains extra redundant baselines, the lowest ant1-ant2 pair is chosen
as the baseline key to insert into model.
Parameters:
-----------
model : type=DataContainer, model dictionary holding complex visibilities
must conform to DataContainer dictionary format.
model_antpos : type=dictionary, dictionary holding antennas positions for model dictionary
keys are antenna integers, values are ndarrays of position vectors in meters
data : type=DataContainer, data dictionary holding complex visibilities.
must conform to DataContainer dictionary format.
data_antpos : type=dictionary, dictionary holding antennas positions for data dictionary
same format as model_antpos
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
Output: (data)
-------
new_model : type=DataContainer, dictionary holding complex visibilities from model that
had matching baselines to data
"""
# create baseline keys for model
model_keys = list(model.keys())
model_bls = np.array(list(map(lambda k: Baseline(model_antpos[k[1]] - model_antpos[k[0]], tol=tol), model_keys)))
# create baseline keys for data
data_keys = list(data.keys())
data_bls = np.array(list(map(lambda k: Baseline(data_antpos[k[1]] - data_antpos[k[0]], tol=tol), data_keys)))
# iterate over data baselines
new_model = odict()
for i, bl in enumerate(model_bls):
# compre bl to all model_bls
comparison = np.array(list(map(lambda mbl: bl == mbl, data_bls)), np.str)
# get matches
matches = np.where((comparison == 'True') | (comparison == 'conjugated'))[0]
# check for matches
if len(matches) == 0:
echo("found zero matches in data for model {}".format(model_keys[i]), verbose=verbose)
continue
else:
if len(matches) > 1:
echo("found more than 1 match in data to model {}: {}".format(model_keys[i], list(map(lambda j: data_keys[j], matches))), verbose=verbose)
# assign to new_data
if comparison[matches[0]] == 'True':
new_model[data_keys[matches[0]]] = model[model_keys[i]]
elif comparison[matches[0]] == 'conjugated':
new_model[data_keys[matches[0]]] = np.conj(model[model_keys[i]])
return DataContainer(new_model) | 83c7d5cc371593ad694fa81e56be6e1034bd693f | 7,132 |
def _choose_random_genes(individual):
"""
Selects two separate genes from individual.
Args:
individual (np.array): Genotype of individual.
Returns:
gene1, gene2 (tuple): Genes separated by at least another gene.
"""
gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten())
while gene2 - gene1 < 2:
gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten())
return (gene1, gene2) | 08555dd3b3f1a04bbd93290fb9c60c37acc3583b | 7,133 |
import types
def incomplete_sample_detection(device_name):
"""Introspect whether a device has 'incomplete sample detection', described here:
www.ni.com/documentation/en/ni-daqmx/latest/devconsid/incompletesampledetection/
The result is determined empirically by outputting a pulse on one counter and
measuring it on another, and seeing whether the first sample was discarded or not.
This requires a non-simulated device with at least two counters. No external signal
is actually generated by the device whilst this test is performed. Credit for this
method goes to Kevin Price, who provided it here:
forums.ni.com/t5/Multifunction-DAQ/_/td-p/3849429
This workaround will hopefully be deprecated if and when NI provides functionality
to either inspect this feature's presence directly, or to disable it regardless of
its presence.
"""
if is_simulated(device_name):
msg = "Can only detect incomplete sample detection on non-simulated devices"
raise ValueError(msg)
if not supports_period_measurement(device_name):
msg = "Device doesn't support period measurement"
raise ValueError(msg)
CI_chans = get_CI_chans(device_name)
if len(CI_chans) < 2:
msg = "Need at least two counters to detect incomplete sample detection"
raise ValueError(msg)
# The counter we will produce a test signal on:
out_chan = CI_chans[0]
# The counter we will measure it on:
meas_chan = CI_chans[1]
# Set up the output task:
out_task = daqmx.Task()
out_task.CreateCOPulseChanTime(
out_chan, "", c.DAQmx_Val_Seconds, c.DAQmx_Val_Low, 0, 1e-3, 1e-3
)
# Prevent the signal being output on the physical terminal:
out_task.SetCOPulseTerm("", "")
# Force CO into idle state to prevent spurious edges when the task is started:
out_task.TaskControl(c.DAQmx_Val_Task_Commit)
# Set up the measurement task
meas_task = daqmx.Task()
meas_task.CreateCIPeriodChan(
meas_chan,
"",
1e-3,
1.0,
c.DAQmx_Val_Seconds,
c.DAQmx_Val_Rising,
c.DAQmx_Val_LowFreq1Ctr,
10.0,
0,
"",
)
meas_task.CfgImplicitTiming(c.DAQmx_Val_ContSamps, 1)
# Specify that we are measuring the internal output of the other counter:
meas_task.SetCIPeriodTerm("", '/' + out_chan + 'InternalOutput')
try:
meas_task.StartTask()
out_task.StartTask()
out_task.WaitUntilTaskDone(10.0)
# How many samples are in the read buffer of the measurement task?
samps_avail = types.uInt32()
meas_task.GetReadAvailSampPerChan(samps_avail)
if samps_avail.value == 0:
# The device discarded the first edge
return True
elif samps_avail.value == 1:
# The device did not discard the first edge
return False
else:
# Unexpected result
msg = "Unexpected number of samples: %d" % samps_avail.value
raise ValueError(msg)
finally:
out_task.ClearTask()
meas_task.ClearTask() | 52fac104ba408273c7876de0c37a62bc6548b7b6 | 7,134 |
def diag_numba(A, b):
""" Fill matrix A with a diagonal represented by vector b.
Parameters
----------
A : array
Base matrix.
b : array
Diagonal vector to fill with.
Returns
-------
array
Matrix A with diagonal filled.
"""
for i in range(b.shape[0]):
A[i, i] = b[i]
return A | 7eb722eaea9e932c7e7d0f3c52b40d224c7152cc | 7,135 |
def get_symminfo(newsymms: dict) -> str:
"""
Adds text about the symmetry generators used in order to add symmetry generated atoms.
"""
line = 'Symmetry transformations used to generate equivalent atoms:\n'
nitems = len(newsymms)
n = 0
for key, value in newsymms.items():
sep = ';'
if n == nitems:
sep = ''
n += 1
line += "#{}: {}{} ".format(key, value, sep)
if newsymms:
return line
else:
return '' | 2b3fdeebac85ea3329839406e611ba051f45ddce | 7,136 |
import random
def get_random_sequences(
self, n=10, length=200, chroms=None, max_n=0.1, outtype="list" # noqa
):
"""
Return random genomic sequences.
Parameters
----------
n : int , optional
Number of sequences to return.
length : int , optional
Length of sequences to return.
chroms : list , optional
Return sequences only from these chromosomes.
max_n : float , optional
Maximum fraction of Ns.
outtype : string , optional
return the output as list or string.
Options: "list" or "string", default: "list".
Returns
-------
list
coordinates as lists or strings:
List with [chrom, start, end] genomic coordinates.
String with "chrom:start-end" genomic coordinates
(can be used as input for track2fasta).
"""
if not chroms:
chroms = self.keys()
# dict of chromosome sizes after subtracting the number of Ns
sizes = dict(
[(chrom, len(self[chrom]) - self.gaps.get(chrom, 0)) for chrom in chroms]
)
# list of (tuples with) chromosomes and their size
# (if that size is long enough for random sequence selection)
lengths = [
(sizes[x], x)
for x in chroms
if sizes[x] / len(self[x]) > 0.1 and sizes[x] > 10 * length
]
if len(lengths) == 0:
raise Exception("No contigs of sufficient size were found.")
# random list of chromosomes from lengths (can have duplicates)
chroms = weighted_selection(lengths, n)
coords = []
retries = 100
cutoff = length * max_n
for chrom in chroms:
for _ in range(retries):
start = int(random() * (sizes[chrom] - length))
end = start + length
count_n = self[chrom][start:end].seq.upper().count("N")
if count_n <= cutoff:
break
else:
raise Exception(
f"Random subset ran {retries} times, "
f"but could not find a sequence with less than {cutoff} N's in {chrom}.\n"
"You can specify contigs using the CHROMS argument."
)
# list output example ["chr1", 123, 456]
coords.append([chrom, start, end])
if outtype != "list":
# bed output example: "chr1:123-456"
for i, region in enumerate(coords):
coords[i] = [f"{region[0]}:{region[1]}-{region[2]}"]
return coords | ac6c33de8d333b3998d5e1b4d20dd2780745f9db | 7,137 |
def get_sampleentropy(data):
"""Sample entropy, using antropy.sample_entropy, in the ML and AP directions. """
x, y = np.array(data[4]), np.array(data[5])
sample_entropy_ML = ant.sample_entropy(x)
sample_entropy_AP = ant.sample_entropy(y)
return sample_entropy_ML, sample_entropy_AP | d69d86de426bf4c7c9110d71a1a3c386a1d042d8 | 7,138 |
def from_json(filename, columns=None, process_func=None):
"""Read data from a json file
Args:
filename: path to a json file
columns (list, optional): list of columns to keep. All columns are kept by default
process_func (function, optional): A callable object that you can pass to process you data in a specific way
Returns:
pandas.DataFrame: return a dataframe object
"""
df = pd.read_json(filename)
return __process_data(df, columns, process_func) | 489603ff61c0a5aaa5012770d7c7649141002027 | 7,139 |
def render_content(tab):
"""
This function displays tabs based on user selection of tab
"""
if tab == 'tab-2':
return filter_based_recommendation.TAB2_LAYOUT
return choice_based_recommendation.CHOICE_BASED_RECOMMENDATION_LAYOUT | 085e17b401b52de4b2ff8b11765a798586832cf8 | 7,140 |
import requests
def course(name, reviews = False):
"""
Get a course.
Parameters
----------
name: string
The name of the course.
reviews: bool, optional
Whether to also return reviews for the course, specifically reviews for
professors that taught the course and have the course listed as the one
being reviewed. Defaults to False.
"""
params = {"name" : name, "reviews": "true" if reviews else "false"}
url = BASE_URL + "course?" + urlencode(params)
return requests.get(url).json() | 969c2a94ecf1bfad227279ba3475772a45939848 | 7,141 |
def task(weight=1):
"""
Used as a convenience decorator to be able to declare tasks for a TaskSet
inline in the class. Example::
class ForumPage(TaskSet):
@task(100)
def read_thread(self):
pass
@task(7)
def create_thread(self):
pass
"""
def decorator_func(func):
func.locust_task_weight = weight
return func
"""
Check if task was used without parentheses (not called), like this::
@task
def my_task()
pass
"""
if callable(weight):
func = weight
weight = 1
return decorator_func(func)
else:
return decorator_func | 9a5af6cb9dabe73a5c08c8938b438b74881f9f26 | 7,142 |
from datetime import datetime
def generate_age(issue_time):
"""Generate a age parameter for MAC authentication draft 00."""
td = datetime.datetime.now() - issue_time
age = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
return unicode_type(age) | 3164ee1422b5eafd56dee0b0e73183fc64d14597 | 7,143 |
def _bqm_from_1sat(constraint):
"""create a bqm for a constraint with only one variable
bqm will have exactly classical gap 2.
"""
configurations = constraint.configurations
num_configurations = len(configurations)
bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
if num_configurations == 1:
val, = next(iter(configurations))
v, = constraint.variables
bqm.add_variable(v, -1 if val > 0 else +1)
else:
bqm.add_variables_from((v, 0.0) for v in constraint.variables)
return bqm.change_vartype(constraint.vartype) | 149967077070e71eae66dc5521dfbae479645eda | 7,144 |
import paramiko
import traceback
import traceback
def _ssh(server):
"""
SSH into a Server
"""
remote_user = server.remote_user
private_key = server.private_key
if not private_key or not remote_user:
if remote_user:
return {"result": "Critical. Missing Private Key",
"status": 3,
}
elif private_key:
return {"result": "Critical. Missing Remote User",
"status": 3,
}
else:
return {"result": "Critical. Missing Remote User & Private Key",
"status": 3,
}
# SSH in & run check
try:
except ImportError:
return {"result": "Critical. Paramiko required.",
"status": 3,
}
keyfile = open(os.path.join(current.request.folder, "uploads", private_key), "r")
mykey = paramiko.RSAKey.from_private_key(keyfile)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname = server.host_ip,
username = remote_user,
pkey = mykey)
except paramiko.ssh_exception.AuthenticationException:
tb_parts = sys.exc_info()
tb_text = "".join(traceback.format_exception(tb_parts[0],
tb_parts[1],
tb_parts[2]))
return {"result": "Critical. Authentication Error\n\n%s" % tb_text,
"status": 3,
}
except paramiko.ssh_exception.SSHException:
tb_parts = sys.exc_info()
tb_text = "".join(traceback.format_exception(tb_parts[0],
tb_parts[1],
tb_parts[2]))
return {"result": "Critical. SSH Error\n\n%s" % tb_text,
"status": 3,
}
return ssh | e54e72c8c4bff8deeac0e29a57393860954b299c | 7,145 |
from typing import Dict
from typing import List
from typing import Any
from typing import Tuple
def _create_group_codes_and_info(
states: pd.DataFrame,
assort_bys: Dict[str, List[str]],
contact_models: Dict[str, Dict[str, Any]],
) -> Tuple[pd.DataFrame, Dict[str, Dict[str, Any]]]:
"""Create group codes and additional information.
Args:
states (pd.DataFrame): The states.
assort_bys (Dict[str, List[str]]): The assortative variables for each contact
model.
contact_models (Dict[str, Dict[str, Any]]): The contact models.
Returns:
A tuple containing:
- states (pandas.DataFrame): The states.
- group_codes_info (Dict[str, Dict[str, Any]]): A dictionary where keys are
names of contact models and values are dictionaries containing the name and
the original codes of the assortative variables.
"""
group_codes_names = _create_group_codes_names(contact_models, assort_bys)
group_codes_info = {}
for model_name, assort_by in assort_bys.items():
is_factorized = contact_models[model_name].get("is_factorized", False)
group_code_name = group_codes_names[model_name]
# Create the group code column if it is not available or if it exists - meaning
# we are resuming a simulation - to recover the groups.
if (group_code_name not in states.columns) or (
group_code_name in states.columns and not is_factorized
):
states[group_code_name], groups = factorize_assortative_variables(
states, assort_by
)
elif group_code_name in states.columns and is_factorized:
states[group_code_name] = states[group_code_name].astype(DTYPE_GROUP_CODE)
unsorted_groups = states[group_code_name].unique()
groups = np.sort(unsorted_groups[unsorted_groups != -1])
else:
groups = states[group_code_name].cat.categories
if is_factorized:
groups = groups[groups != -1]
group_codes_info[model_name] = {"name": group_code_name, "groups": groups}
return states, group_codes_info | 6acac718aa639e9584dbc5d7cb7d601731aa674e | 7,146 |
def quiver_plotter(X, Y, Z, plot_range=None, mes_unit='', title='', x_label=r'$x$', y_label=r'$y$', show_plot=True, dark=False):
"""
Generates a plot of some vector fields.
Parameters
----------
X : numpy.ndarray
Matrix with values for the first axis on all the rows.
Y : numpy.ndarray
Matrix with values for the second axis on all the columns.
Z : numpy.ndarray or list of numpy.ndarray
Either a matrix with 3 dimension and the last two dimensions like the
dimensions of X and Y or a list of two matricies with the same size as
X and Y.
plot_range : list of floats, optional
List with the range for the plot. The defualt is None.
mes_unit : str, optional
Units of measure of the vectors shown. The default is ''.
title : str, optional
Title of the plot. The default is ''.
x_label : str, optional
The name on the first axis. The default is r'$x$'.
y_label : str, optional
Name on the second axis. The default is r'$y$'.
show_plot : bool, optional
Flag for printing the figure with plt.show(). The default is True.
dark : bool, optional
Flag for changing the graph color to a dark theme. The default is False.
Raises
------
ValueError
If the size of either X, Y or Z don't match.
TypeError
If the Z parameter is neither a list of numpy.ndarray or a numpy.ndarray
Returns
-------
fig : matplotlib.figure.Figure
Figure with the plot.
"""
if isinstance(Z, list):
if len(Z) != 2:
raise ValueError("The argument z should be a list of two elements.")
else:
q_x = Z[0]
q_y = Z[1]
elif isinstance(Z, np.ndarray):
if len(Z.shape) != 3 or Z.shape[0] < 2:
raise ValueError(
"The argument z should be a numpy array of dimension 3 with at least 2 values on the first axis.")
else:
q_x = Z[0, :]
q_y = Z[1, :]
else:
raise TypeError(
"The argument z should be a list of numpy.ndarray or an instance of numpy.ndarray.")
range_reduction = True
if plot_range == None:
range_reduction = False
elif not isinstance(plot_range, list):
raise TypeError('The argument should be a list of floats.')
elif len(plot_range) != 4:
raise ValueError(
'The number of elements in plot_range should be 4, here it is {}'.format(len(plot_range)))
if q_x.shape != X.shape or q_x.shape != Y.shape or q_y.shape != X.shape or q_y.shape != Y.shape:
raise ValueError("The shape of X, Y and the two elements in Z must coincide.")
if range_reduction:
x_max = plot_range[1]
x_min = plot_range[0]
y_max = plot_range[3]
y_min = plot_range[2]
idx_x_min, idx_x_max = _crop_array_idxs(X[:, 0], x_min, x_max)
idx_y_min, idx_y_max = _crop_array_idxs(Y[0, :], y_min, y_max)
X = X[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1]
Y = Y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1]
q_x = q_x[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1]
q_y = q_y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1]
# plotting of the function
fig = plt.figure(figsize=fig_size)
ax = fig.gca()
Q = ax.quiver(X, Y, q_x, q_y, pivot='tail')
ax.quiverkey(Q, 0.9, 0.9, 1, '1' + mes_unit, labelpos='E', coordinates='figure')
if range_reduction:
ax.axis(plot_range)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
title = ax.set_title(title)
if dark:
_darkizer(fig, ax, title)
if show_plot:
plt.show()
return fig | 55a0c5768ce2827c851abf468030824ee9a1411e | 7,147 |
def get_attr(item, name, default=None):
"""
similar to getattr and get but will test for class or dict
:param item:
:param name:
:param default:
:return:
"""
try:
val = item[name]
except (KeyError, TypeError):
try:
val = getattr(item, name)
except AttributeError:
val = default
return val | 0c68c7e54ef901e18a49d327188f29f72f54da01 | 7,148 |
def float2(val, min_repeat=6):
"""Increase number of decimal places of a repeating decimal.
e.g. 34.111111 -> 34.1111111111111111"""
repeat = 0
lc = ""
for i in range(len(val)):
c = val[i]
if c == lc:
repeat += 1
if repeat == min_repeat:
return float(val[:i+1] + c * 10)
else:
lc = c
repeat = 1
return float(val) | 07fc521e877387242a1e6cf951a6d5cbdc925aaf | 7,149 |
def load_array_meta(loader, filename, index):
"""
Load the meta-data data associated with an array from the specified index
within a file.
"""
return loader(filename, index) | e53ed1d795edf2285b3eca333a7650a378c26b9a | 7,150 |
def viterbi_value(theta: np.ndarray, operator: str = 'hardmax') \
-> float:
"""
Viterbi operator.
:param theta: _numpy.ndarray, shape = (T, S, S),
Holds the potentials of the linear chain CRF
:param operator: str in {'hardmax', 'softmax', 'sparsemax'},
Smoothed max-operator
:return: float,
DTW value $Vit(\theta)$
"""
return viterbi_grad(theta, operator)[0] | 7b1c37143c05f400cc910e07c97e51d4d3788ca9 | 7,151 |
def pack32(n):
"""Convert a Python int to a packed signed long (4 bytes)."""
return pack('<i', n) | 0caebee4af80c4defb75ed8512cb2d5d13cd7ede | 7,152 |
def run_rollout(
policy,
env,
horizon,
use_goals=False,
render=False,
video_writer=None,
video_skip=5,
terminate_on_success=False,
):
"""
Runs a rollout in an environment with the current network parameters.
Args:
policy (RolloutPolicy instance): policy to use for rollouts.
env (EnvBase instance): environment to use for rollouts.
horizon (int): maximum number of steps to roll the agent out for
use_goals (bool): if True, agent is goal-conditioned, so provide goal observations from env
render (bool): if True, render the rollout to the screen
video_writer (imageio Writer instance): if not None, use video writer object to append frames at
rate given by @video_skip
video_skip (int): how often to write video frame
terminate_on_success (bool): if True, terminate episode early as soon as a success is encountered
Returns:
results (dict): dictionary containing return, success rate, etc.
"""
assert isinstance(policy, RolloutPolicy)
assert isinstance(env, EnvBase)
policy.start_episode()
ob_dict = env.reset()
goal_dict = None
if use_goals:
# retrieve goal from the environment
goal_dict = env.get_goal()
results = {}
video_count = 0 # video frame counter
total_reward = 0.
success = { k: False for k in env.is_success() } # success metrics
try:
for step_i in range(horizon):
# get action from policy
ac = policy(ob=ob_dict, goal=goal_dict)
# play action
ob_dict, r, done, _ = env.step(ac)
# render to screen
if render:
env.render(mode="human")
# compute reward
total_reward += r
cur_success_metrics = env.is_success()
for k in success:
success[k] = success[k] or cur_success_metrics[k]
# visualization
if video_writer is not None:
if video_count % video_skip == 0:
video_img = env.render(mode="rgb_array", height=512, width=512)
video_writer.append_data(video_img)
video_count += 1
# break if done
if done or (terminate_on_success and success["task"]):
break
except env.rollout_exceptions as e:
print("WARNING: got rollout exception {}".format(e))
results["Return"] = total_reward
results["Horizon"] = step_i + 1
results["Success_Rate"] = float(success["task"])
# log additional success metrics
for k in success:
if k != "task":
results["{}_Success_Rate".format(k)] = float(success[k])
return results | 52324d72a80aea83b667faa08e6e95c561311ee5 | 7,153 |
from typing import Union
from typing import Type
from typing import List
from typing import Optional
from typing import Dict
from typing import Any
from typing import cast
def create_test_client(
route_handlers: Union[
Union[Type[Controller], BaseRouteHandler, Router, AnyCallable],
List[Union[Type[Controller], BaseRouteHandler, Router, AnyCallable]],
],
after_request: Optional[AfterRequestHandler] = None,
allowed_hosts: Optional[List[str]] = None,
backend: str = "asyncio",
backend_options: Optional[Dict[str, Any]] = None,
base_url: str = "http://testserver",
before_request: Optional[BeforeRequestHandler] = None,
cors_config: Optional[CORSConfig] = None,
dependencies: Optional[Dict[str, Provide]] = None,
exception_handlers: Optional[Dict[Union[int, Type[Exception]], ExceptionHandler]] = None,
guards: Optional[List[Guard]] = None,
middleware: Optional[List[Union[Middleware, Type[BaseHTTPMiddleware], Type[MiddlewareProtocol]]]] = None,
on_shutdown: Optional[List[LifeCycleHandler]] = None,
on_startup: Optional[List[LifeCycleHandler]] = None,
openapi_config: Optional[OpenAPIConfig] = None,
template_config: Optional[TemplateConfig] = None,
plugins: Optional[List[PluginProtocol]] = None,
raise_server_exceptions: bool = True,
root_path: str = "",
static_files_config: Optional[Union[StaticFilesConfig, List[StaticFilesConfig]]] = None,
cache_config: CacheConfig = DEFAULT_CACHE_CONFIG,
) -> TestClient:
"""Create a TestClient"""
return TestClient(
app=Starlite(
after_request=after_request,
allowed_hosts=allowed_hosts,
before_request=before_request,
cors_config=cors_config,
dependencies=dependencies,
exception_handlers=exception_handlers,
guards=guards,
middleware=middleware,
on_shutdown=on_shutdown,
on_startup=on_startup,
openapi_config=openapi_config,
template_config=template_config,
plugins=plugins,
route_handlers=cast(Any, route_handlers if isinstance(route_handlers, list) else [route_handlers]),
static_files_config=static_files_config,
cache_config=cache_config,
),
backend=backend,
backend_options=backend_options,
base_url=base_url,
raise_server_exceptions=raise_server_exceptions,
root_path=root_path,
) | f110972c735e9ad81eca1f267651e97732d6e37c | 7,154 |
def queue_tabnav(context):
"""Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (tab_code, page_url, tab_text)
"""
counts = context['queue_counts']
request = context['request']
listed = not context.get('unlisted')
if listed:
tabnav = [('nominated', 'queue_nominated',
(ungettext('New Add-on ({0})',
'New Add-ons ({0})',
counts['nominated'])
.format(counts['nominated']))),
('pending', 'queue_pending',
(ungettext('Update ({0})',
'Updates ({0})',
counts['pending'])
.format(counts['pending']))),
('moderated', 'queue_moderated',
(ungettext('Moderated Review ({0})',
'Moderated Reviews ({0})',
counts['moderated'])
.format(counts['moderated'])))]
if acl.action_allowed(request, amo.permissions.ADDONS_POST_REVIEW):
tabnav.append(
('auto_approved', 'queue_auto_approved',
(ungettext('Auto Approved Add-on ({0})',
'Auto Approved Add-ons ({0})',
counts['auto_approved'])
.format(counts['auto_approved']))),
)
else:
tabnav = [
('all', 'unlisted_queue_all', ugettext('All Unlisted Add-ons'))
]
return tabnav | 6f3777ce46f09a6946ba66755a3ae27eda126da5 | 7,155 |
def _plot_feature_correlations(ax, correlation_matrix, cmap="coolwarm", annot=True, fmt=".2f", linewidths=.05):
"""
Creates a heatmap plot of the feature correlations
Args:
:ax: the axes object to add the plot to
:correlation_matrix: the feature correlations
:cmap: the color map
:annot: whether to annotate the heatmap
:fmt: how to format the annotations
:linewidths: line width in the plot
Returns:
The heatmap
"""
hm = sns.heatmap(correlation_matrix, ax=ax, cmap=cmap, annot=annot, fmt=fmt,
linewidths=linewidths)
return hm | c7835c743552eec6beb3441bc324c2192d4db9d7 | 7,156 |
import tempfile
import copy
def graphviz_visualization(activities_count, dfg, image_format="png", measure="frequency",
max_no_of_edges_in_diagram=100000, start_activities=None,
end_activities=None, soj_time=None, font_size="12",
bgcolor="transparent", stat_locale: dict = None):
"""
Do GraphViz visualization of a DFG graph
Parameters
-----------
activities_count
Count of attributes in the log (may include attributes that are not in the DFG graph)
dfg
DFG graph
image_format
GraphViz should be represented in this format
measure
Describes which measure is assigned to edges in direcly follows graph (frequency/performance)
max_no_of_edges_in_diagram
Maximum number of edges in the diagram allowed for visualization
start_activities
Start activities of the log
end_activities
End activities of the log
soj_time
For each activity, the sojourn time in the log
stat_locale
Dict to locale the stat strings
Returns
-----------
viz
Digraph object
"""
if start_activities is None:
start_activities = {}
if end_activities is None:
end_activities = {}
if stat_locale is None:
stat_locale = {}
filename = tempfile.NamedTemporaryFile(suffix='.gv')
viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor})
# first, remove edges in diagram that exceeds the maximum number of edges in the diagram
dfg_key_value_list = []
for edge in dfg:
dfg_key_value_list.append([edge, dfg[edge]])
# more fine grained sorting to avoid that edges that are below the threshold are
# undeterministically removed
dfg_key_value_list = sorted(dfg_key_value_list, key=lambda x: (x[1], x[0][0], x[0][1]), reverse=True)
dfg_key_value_list = dfg_key_value_list[0:min(len(dfg_key_value_list), max_no_of_edges_in_diagram)]
dfg_allowed_keys = [x[0] for x in dfg_key_value_list]
dfg_keys = list(dfg.keys())
for edge in dfg_keys:
if edge not in dfg_allowed_keys:
del dfg[edge]
# calculate edges penwidth
penwidth = assign_penwidth_edges(dfg)
activities_in_dfg = set()
activities_count_int = copy(activities_count)
for edge in dfg:
activities_in_dfg.add(edge[0])
activities_in_dfg.add(edge[1])
# assign attributes color
activities_color = get_activities_color(activities_count_int)
# represent nodes
viz.attr('node', shape='box')
if len(activities_in_dfg) == 0:
activities_to_include = sorted(list(set(activities_count_int)))
else:
# take unique elements as a list not as a set (in this way, nodes are added in the same order to the graph)
activities_to_include = sorted(list(set(activities_in_dfg)))
activities_map = {}
for act in activities_to_include:
if "frequency" in measure and act in activities_count_int:
viz.node(str(hash(act)), act + " (" + str(activities_count_int[act]) + ")", style='filled',
fillcolor=activities_color[act], fontsize=font_size)
activities_map[act] = str(hash(act))
else:
stat_string = human_readable_stat(soj_time[act], stat_locale)
viz.node(str(hash(act)), act + f" ({stat_string})", fontsize=font_size)
activities_map[act] = str(hash(act))
# make edges addition always in the same order
dfg_edges = sorted(list(dfg.keys()))
# represent edges
for edge in dfg_edges:
if "frequency" in measure:
label = str(dfg[edge])
else:
label = human_readable_stat(dfg[edge], stat_locale)
viz.edge(str(hash(edge[0])), str(hash(edge[1])), label=label, penwidth=str(penwidth[edge]), fontsize=font_size)
start_activities_to_include = [act for act in start_activities if act in activities_map]
end_activities_to_include = [act for act in end_activities if act in activities_map]
if start_activities_to_include:
viz.node("@@startnode", "<●>", shape='circle', fontsize="34")
for act in start_activities_to_include:
label = str(start_activities[act]) if isinstance(start_activities, dict) else ""
viz.edge("@@startnode", activities_map[act], label=label, fontsize=font_size)
if end_activities_to_include:
# <■>
viz.node("@@endnode", "<■>", shape='doublecircle', fontsize="32")
for act in end_activities_to_include:
label = str(end_activities[act]) if isinstance(end_activities, dict) else ""
viz.edge(activities_map[act], "@@endnode", label=label, fontsize=font_size)
viz.attr(overlap='false')
viz.format = image_format
return viz | f04bd9e0f076887072805f57d260310f309547fd | 7,157 |
from scipy.interpolate import RegularGridInterpolator
def sig_io_func(p, ca, sv):
# The method input gives control over how the Nafion conductivity is
# calculated. Options are 'lam' for laminar in which an interpolation is
# done using data from [1], 'bulk' for treating the thin Nafion shells the
# as a bulk-like material using NR results from [5], and 'mix' which uses a
# weighted parallel mixutre of 'lam' and 'bulk' based on how much Pt vs C
# exists at current conditions. This is because it is speculated that Pt
# may have lamellae although C may not. 'sun' was also added to the
# agglomerate model options which takes constant values used in [2].
# Inputs: Temperature [K], Nafion shell thickness [m], rel. humiditiy [%],
# Pt coverage [%], p['eps/tau2_n'] [-] and p['p_eff_SAnaf'] [-],
# and calculation method [-]
""" Lamellae Method """
# Data below is taken from "Proton Transport in Supported Nafion Nanothin
# Films by Electrochemical Impedence Spectroscopy" by Paul, MacCreery, and
# Karan in their Supporting Information Document [1]. The data was given in
# mS/cm and converted to S/m for the model calling this function.
# indecies: temperature [C], Nafion shell thickness [nm], RH [%]
sig_data = np.zeros([5,5,5])
temp_vals = np.array([25,30,40,50,60])
thick_vals = np.array([4,10,55,160,300])
RH_vals = np.array([20,40,60,80,95])
# v_w = np.zeros([p['Ny'],p['Nr']])
# for i in range(p['Ny']):
# ih_n = ca.naf_b[i].species_index('H(Naf)')
# ih2o_n = ca.naf_b[i].species_index('H2O(Naf)')
# for j in range(p['Nr']):
# ca.naf_b[i].Y = sv[ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r']]
# v_k = ca.naf_b[i].X*ca.naf_b[i].partial_molar_volumes
# v_w[i,j] = v_k[ih2o_n] / sum(v_k)
# v_w_a = np.sum(p['Vf_shl']*v_w,axis=1)
# lamb_n = np.clip((v_w_a / (1 - v_w_a) *983/1980 *1100/18.02), 0., 22.)
rho_naf_w = np.zeros([p['Ny'],p['Nr']])
for i in range(p['Ny']):
ih2o_n = ca.naf_b[i].species_index('H2O(Naf)')
for j in range(p['Nr']):
ind = ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r']
rho_naf_w[i,j] = sv[ind][ih2o_n]
rho_naf_av = np.sum(p['Vf_shl']*rho_naf_w,axis=1)
RH, RH_C = np.zeros(p['Ny']), np.zeros(p['Ny'])
for i in range(p['Ny']):
av = rho_naf_av[i]
if av > 0:
RH[i] = RH_eq_func(av,p,i)*100
RH_C[i] = RH_eq_func(av/2,p,i)*100
else:
RH[i] = min(RH_vals)
RH_C[i] = min(RH_vals)
"Data for 25C as thickness[nm] for rows and RH[%] for columns"
sig_data[0,:,:] = np.array([[0.0002,0.0206,0.4138,4.9101,21.888], # t 4nm
[0.0002,0.0199,0.4073,5.1758,23.9213], # t 10nm
[0.0002,0.0269,0.5448,5.3493,22.753], # t 55nm
[0.3362,3.2505,8.3065,27.0725,54.0428], # t 160nm
[1.5591,8.8389,19.6728,None,None]]) # t 300nm
"Data for 30C as thickness[nm] for rows and RH[%] for columns"
sig_data[1,:,:] = np.array([[0.0001,0.012,0.278,3.432,21.481], # t 4nm
[0.0003,0.018,0.339,3.895,22.062], # t 10nm
[0.0004,0.028,0.550,4.296,20.185], # t 55nm
[0.0016,0.081,1.120,9.244,34.810], # t 160nm
[0.0071,0.359,2.797,10.978,43.913]]) # t 300nm
"Data for 40C as thickness[nm] for rows and RH[%] for columns"
sig_data[2,:,:] = np.array([[0.0003,0.029,0.585,6.164,30.321], # t 4nm
[0.0009,0.034,0.625,5.374,48.799], # t 10nm
[0.0011,0.065,0.931,6.909,40.439], # t 55nm
[0.0032,0.152,1.770,14.162,68.326], # t 160nm
[0.0140,0.605,4.939,17.083,68.334]]) # t 300nm
"Data for 50C as thickness[nm] for rows and RH[%] for columns"
sig_data[3,:,:] = np.array([[0.001,0.062,1.087,8.335,37.686], # t 4nm
[0.002,0.077,1.031,8.127,57.339], # t 10nm
[0.002,0.121,1.603,9.149,48.934], # t 55nm
[0.007,0.247,2.704,19.221,72.006], # t 160nm
[0.031,1.076,7.185,20.981,83.923]]) # t 300nm
"Data for 60C as thickness[nm] for rows and RH[%] for columns"
sig_data[4,:,:] = np.array([[0.003,0.14,1.51,11.16,55.18], # t 4nm
[0.003,0.17,1.72,13.67,62.39], # t 10nm
[0.007,0.24,2.29,16.60,63.20], # t 55nm
[0.015,0.45,4.31,26.63,93.33], # t 160nm
[0.009,0.44,3.43,26.73,100.60]]) # t 300nm
"Create interpolation function for relavent ranges"
sig_io_int = RegularGridInterpolator((temp_vals,thick_vals,RH_vals),sig_data)
"Call interpolation function for model specified paramaters"
# Multiplication by 0.1 is unit conversion from mS/cm to S/m. Runner file
# stores T and t_naf in [K] and [m] so are also converted inside the
# interpolation function to the same units as original data [C] and [nm].
RH = np.clip(RH, min(RH_vals), max(RH_vals))
RH_C = np.clip(RH_C, min(RH_vals), max(RH_vals))
pts = np.zeros([p['Ny'],3])
for i in range(p['Ny']):
pts[i,:] = [p['T']-273, p['t_naf'][i]*1e9, RH[i]]
sig_io_lam = sig_io_int(pts) *0.1
""" Bulk Method """
# This method assumes that the thin shell of Nafion is treated the same as
# the bulk material. Lambda is calculated using an empirical relationship.
# Then the sig_io formula from [5] for a bulk membrane is used and scaled
# by the scaling factor, also from [5].
# The loop below assumes RH is not RH_eq and instead is the actual local
# gas-phase RH.
if p['sig_method'] == 'lit':
for i in range(p['Ny']):
ih2o_g = ca.gas.species_index('H2O')
rho_gas_k = sv[ca.ptr['rho_gas_k'] +i*p['nxt_y']]
ca.gas.TDY = p['T'], sum(rho_gas_k), rho_gas_k
RH[i] = ca.gas.X[ih2o_g]*ca.gas.P / 19946 *100
lamb_n = 0.3 + 10.8*(RH/100) - 16*(RH/100)**2 + 14.1*(RH/100)**3
sig_io_lit = (0.5139*lamb_n - 0.326)*np.exp(1268*(1/303 - 1/p['T']))
sig_io_bulk = sig_io_lit *0.672
""" Mix Method """
# Using a parallel resistor network to weight the conductivity through
# lamellae and that through bulk-like material is performed with respect to
# the amount of Pt and C areas respectively.
sig_io_mix = 1 / (p['p_Pt']/100 /sig_io_lam +(1-p['p_Pt']/100) /sig_io_bulk)
" Set conductivity depending on method "
# Based on the method, return the appropriate conductivity.
if p['sig_method'] == 'lam': sig_io = sig_io_lam
elif p['sig_method'] == 'bulk': sig_io = sig_io_bulk
elif p['sig_method'] == 'mix': sig_io = sig_io_mix
elif p['sig_method'] == 'lit': sig_io = sig_io_lit
# Output returns ionic conductivity [S/m]
return sig_io | 3a51f6899d9d8792378d0870fa56f15172e1d6cc | 7,158 |
def srwl_opt_setup_cyl_fiber(_foc_plane, _delta_ext, _delta_core, _atten_len_ext, _atten_len_core, _diam_ext, _diam_core, _xc, _yc):
"""
Setup Transmission type Optical Element which simulates Cylindrical Fiber
:param _foc_plane: plane of focusing: 1- horizontal (i.e. fiber is parallel to vertical axis), 2- vertical (i.e. fiber is parallel to horizontal axis)
:param _delta_ext: refractive index decrement of extenal layer
:param _delta_core: refractive index decrement of core
:param _atten_len_ext: attenuation length [m] of external layer
:param _atten_len_core: attenuation length [m] of core
:param _diam_ext: diameter [m] of external layer
:param _diam_core: diameter [m] of core
:param _xc: horizontal coordinate of center [m]
:param _yc: vertical coordinate of center [m]
:return: transmission (SRWLOptT) type optical element which simulates Cylindrical Fiber
"""
def ray_path_in_cyl(_dx, _diam):
r = 0.5*_diam
pathInCyl = 0
if((_dx > -r) and (_dx < r)):
pathInCyl = 2*sqrt(r*r - _dx*_dx)
return pathInCyl
ne = 1
nx = 101
ny = 1001
rx = 10e-03
ry = _diam_ext*1.2
if(_foc_plane == 1): #focusing plane is horizontal
nx = 1001
ny = 101
rx = _diam_ext*1.2
ry = 10e-03
opT = SRWLOptT(nx, ny, rx, ry, None, 1, 1e+23, 1e+23, _xc, _yc)
hx = rx/(nx - 1)
hy = ry/(ny - 1)
ofst = 0
pathInExt = 0
pathInCore = 0
if(_foc_plane == 2): #focusing plane is vertical
y = -0.5*ry #cylinder is always centered on the grid, however grid can be shifted
for iy in range(ny):
pathInExt = 0; pathInCore = 0
if(_diam_core > 0):
pathInCore = ray_path_in_cyl(y, _diam_core)
pathInExt = ray_path_in_cyl(y, _diam_ext) - pathInCore
argAtten = -0.5*pathInExt/_atten_len_ext
if(_atten_len_core > 0):
argAtten -= 0.5*pathInCore/_atten_len_core
ampTr = exp(argAtten) #amplitude transmission
optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference
for ix in range(nx):
opT.arTr[ofst] = ampTr #amplitude transmission
opT.arTr[ofst + 1] = optPathDif #optical path difference
ofst += 2
y += hy
else: #focusing plane is horizontal
perY = 2*nx
x = -0.5*rx #cylinder is always centered on the grid, however grid can be shifted
for ix in range(nx):
pathInExt = 0; pathInCore = 0
if(_diam_core > 0):
pathInCore = ray_path_in_cyl(x, _diam_core)
pathInExt = ray_path_in_cyl(x, _diam_ext) - pathInCore
argAtten = -0.5*pathInExt/_atten_len_ext
if(_atten_len_core > 0):
argAtten -= 0.5*pathInCore/_atten_len_core
ampTr = exp(argAtten) #amplitude transmission
optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference
ix2 = ix*2
for iy in range(ny):
ofst = iy*perY + ix2
opT.arTr[ofst] = ampTr #amplitude transmission
opT.arTr[ofst + 1] = optPathDif #optical path difference
x += hx
return opT | 45cd80cc3dee7e9ab61311e7bbc574722feadb49 | 7,159 |
from enum import Enum
def __create_menu_elements() -> Enum:
"""Create Menu Elements.
:return: Menu elements as an enum in the format KEY_WORD -> Vales(char, KeyWord)
"""
menu_keys = ["MAIN_MENU", "PROFILE", "CLEAN_TIME", "READINGS", "PRAYERS", "DAILY_REFLECTION", "JUST_FOR_TODAY",
"LORDS_PRAYER", "SERENITY_PRAYER", "ST_JOSEPHS_PRAYER", "TENDER_AND_COMPASSIONATE_GOD",
"THIRD_STEP_PRAYER", "SEVENTH_STEP_PRAYER", "ELEVENTH_STEP_PRAYER"]
menu_values_chr = [chr(ch) for ch in range(len(menu_keys))]
menu_values_str = ["MainMenu", "Profile", "CleanTime", "Readings", "Prayers", "DailyReflection", "JustForToday",
"LordsPrayer", "SerenityPrayer", "StJosephsPrayer", "TenderAndCompassionateGod",
"ThirdStepPrayer", "SeventhStepPrayer", "EleventhStepPrayer"]
return Enum('MenuElements', {k: MenuElementValues(data=v1, name=v2)
for k, v1, v2 in zip(menu_keys, menu_values_chr, menu_values_str)}) | 4407da506b681b124975827d94471e58089452a5 | 7,160 |
import math
def solve(coordinates):
"""
알고리즘 풀이 함수 : 두 점의 최단거리를 구해주는 함수
:param coordinates: 좌표들
:return: 두 점의 최단거리
"""
n = len(coordinates)
x_coordinates = [coordinate[0] for coordinate in coordinates]
y_coordinates = [coordinate[1] for coordinate in coordinates]
middle_point = (sum_of_list(x_coordinates) / n,
sum_of_list(y_coordinates) / n)
# print(middle_point) # test
distances = [distance(middle_point, point) for point in coordinates]
# print(distances) # test
distance_difference = list()
for i in range(n - 1):
coordinate_info = {
'indices': (i, i + 1),
'difference': math.fabs(distances[i] - distances[i + 1])
}
distance_difference.append(coordinate_info)
# print(distance_difference) # test
indices = get_indices(distance_difference)
return distance(coordinates[indices[0]], coordinates[indices[1]]) | fc6594e45537cf07bac870b6f932b00dd59d57bd | 7,161 |
def get_cache_key(account, container=None, obj=None):
"""
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:returns: a string cache_key
"""
if obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
cache_key = 'object/%s/%s/%s' % (account, container, obj)
elif container:
if not account:
raise ValueError('Container cache key requires account')
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
return cache_key | d46270d33fcbaecc0bf1886965ac1b1771a3fc8d | 7,162 |
import os
def dir_plus_file(fname):
"""Splits pathnames into the dirname plus the filename."""
return os.path.split(fname) | b83777aa6897e7ed1af73b187b28626fa174ac14 | 7,163 |
def arctan(x):
"""Returns arctan(x)"""
if type(x) in (float,_numpy._numpy.float64): x = _numpy._numpy.array([x])
a = abs(x)
r = arctan_1px( a - 1. )
f = arctan_series( a )
eps = _numpy._numpy.finfo(1.).eps
g = arctan_series( 1. / maximum( 0.125, a ) )
g = 0.5 * _numpy._numpy.pi - g
j = ( a < 0.5 )
r[j] = f[j]
j = ( a > 2. )
r[j] = g[j]
j = ( x<0 )
r[j] = -r[j]
if r.size==1: return r[0]
return r | 6bb5f45115abd34bc7ba7892fac28eb397a131f6 | 7,164 |
def uniform_dist(low, high):
"""Return a random variable uniformly distributed between `low` and `high`.
"""
return sp_uniform(low, high - low) | e4520ee4a5a44c33fe565788b4d576a35f4c3430 | 7,165 |
import re
from typing import MutableMapping
def flatten(dictionary, parent_key=False, separator='_'):
"""
Turn a nested dictionary into a flattened dictionary
:param dictionary: The dictionary to flatten
:param parent_key: The string to prepend to dictionary's keys
:param separator: The string used to separate flattened keys
:return: A flattened dictionary
"""
items = []
for key, value in list(dictionary.items()):
if crumbs: print(('checking:',key))
new_key = (re.sub('[^A-Za-z0-9]+', '', str(parent_key)) + separator + re.sub('[^A-Za-z0-9]+', '', key) if parent_key else key).lower()
if isinstance(value, MutableMapping):
if crumbs: print((new_key,': dict found'))
if not list(value.items()):
if crumbs: print(('Adding key-value pair:',new_key,None))
items.append((new_key,None))
else:
items.extend(list(flatten(value, new_key, separator).items()))
elif isinstance(value, list):
if crumbs: print((new_key,': list found'))
if len(value):
for k, v in enumerate(value):
items.extend(list(flatten({str(k): v}, new_key).items()))
else:
if crumbs: print(('Adding key-value pair:',new_key,None))
items.append((new_key,None))
else:
if crumbs: print(('Adding key-value pair:',new_key,value))
items.append((new_key, value))
return dict(items) | 45131797a602c4fcb9f40f275d755c068b1baa83 | 7,166 |
def format_sec_to_hms(sec):
"""Format seconds to hours, minutes, seconds.
Args:
sec: float or int
Number of seconds in a period of time
Returns: str
Period of time represented as a string on the form ``0d\:00h\:00m``.
"""
rem_int, s_int = divmod(int(sec), 60)
h_int, m_int, = divmod(rem_int, 60)
return "{}h {:02d}m {:02d}s".format(h_int, m_int, s_int) | aa2cc5d6584cdebf4d37292435ecd46bb6adc4a4 | 7,167 |
def one_hot_encode(data):
"""turns data into onehot encoding
Args:
data (np.array): (n_samples,)
Returns:
np.array: shape (n_samples, n_classes)
"""
n_classes = np.unique(data).shape[0]
onehot = np.zeros((data.shape[0], n_classes))
for i, val in enumerate(data.astype(int)):
onehot[i, val] = 1.
return onehot | 58602ffa7d5964bfbb4b8457f698aad800cb3298 | 7,168 |
def is_number(input_str):
"""Check if input_str is a string number
Args:
input_str (str): input string
Returns:
bool: True if input_str can be parse to a number (float)
"""
try:
float(input_str)
return True
except ValueError:
return False | d22fe852a15e3d926cffb36ea3d8a235592ea62a | 7,169 |
def impute_between(coordinate_a, coordinate_b, freq):
"""
Args:
coordinate_a:
coordinate_b:
freq:
Returns:
"""
metrics = discrete_velocity(coordinate_a, coordinate_b)
b, d, sec = metrics['binning'], metrics['displacement'], metrics['time_delta']
if b != 'stationary' or d > 75 or sec > 60**2*12:
return None
a_lat, a_lon, a_ts = coordinate_a
b_lat, b_lon, b_ts = coordinate_b
if not (isinstance(a_ts, dt.datetime) and isinstance(b_ts, dt.datetime)):
raise TypeError('third element of each coordinate tuple must be dt')
fill_range = list(pd.date_range(a_ts, b_ts, freq=freq))
# ensure the returned dataframe range is exclusive
if fill_range[0] == a_ts:
fill_range.remove(fill_range[0])
if len(fill_range) == 0:
return None
if fill_range[-1] == b_ts:
fill_range.remove(fill_range[-1])
fill_lat = np.linspace(a_lat, b_lat, len(fill_range))
fill_lon = np.linspace(a_lon, b_lon, len(fill_range))
t = dict(lat=fill_lat, lon=fill_lon, ts=fill_range)
return pd.DataFrame(t) | 208729df0bd701302103a30e01e0cbdc5208f118 | 7,170 |
def seq(fr,to,by):
"""An analogous function to 'seq' in R
Parameters:
1. fr: from
2. to: to
3. by: by (interval)
"""
if fr<to:
return range(fr,to+abs(by),abs(by))
elif fr>to:
if by>0:
aseq = range(fr,to-by,-1*by)
else:
aseq = range(fr,to+by,by)
else:
aseq = [fr]
if aseq[-1]>to: return aseq[:-1]
else: return aseq | 39b7878f81e93c137eed1e435e438b1645b09f9f | 7,171 |
def _get_config_from_context(ctx):
"""
:param ctx:
:return:
:rtype: semi.config.configuration.Configuration
"""
return ctx.obj["config"] | c085f69fd87ad5f72c8453e6f01771d943b2c481 | 7,172 |
def _invert_options(matrix=None, sparse=None):
"""Returns |invert_options| (with default values) for a given |NumPy| matrix.
See :func:`sparse_options` for documentation of all possible options for
sparse matrices.
Parameters
----------
matrix
The matrix for which to return the options.
sparse
Instead of providing a matrix via the `matrix` argument,
`sparse` can be set to `True` or `False` to requset the
invert options for sparse or dense matrices.
Returns
-------
A tuple of all possible |invert_options|.
"""
global _dense_options, _dense_options_sid, _sparse_options, _sparse_options_sid
assert (matrix is None) != (sparse is None)
sparse = sparse if sparse is not None else issparse(matrix)
if sparse:
if not _sparse_options or _sparse_options_sid != defaults_sid():
_sparse_options = sparse_options()
_sparse_options_sid = defaults_sid()
return _sparse_options
else:
return _sparse_options
else:
if not _dense_options or _dense_options_sid != defaults_sid():
_dense_options = dense_options()
_dense_options_sid = defaults_sid()
return _dense_options
else:
return _dense_options | 0625b86038c29dbf0e6db4e87b9e76de05bce426 | 7,173 |
from typing import AnyStr
import os
def directory_is_empty(path: AnyStr) -> bool:
"""
:param path: a directory path
:return: True if directory is empty, False otherwise
"""
return not any(os.scandir(path)) | 906011bcffe994f34382d54171190c864e72ee6b | 7,174 |
def get_Carrot_scramble(n=70):
""" Gets a Carrot-notation scramble of length `n` for a Megaminx. Defaults to csTimer's default length of 70. """
return _UTIL_SCRAMBLER.call("util_scramble.getMegaminxCarrotScramble", n).replace('\n','').replace(' ',' ').replace(' ',' ') | 8155294b86f9d5cbe756f7476afb952446603d8c | 7,175 |
def convex_env_train(Xs, Ys):
"""
Identify the convex envelope on the set of models
from the train set.
"""
# Sort the list in either ascending or descending order of the
# items values in Xs
key_X_pairs = sorted(Xs.items(), key=lambda x: x[1],
reverse=False) # this is a list of (key, val) pairs
# Start the Pareto frontier with the first key value in the sorted list
p_front = [key_X_pairs[0][0]]
# Loop through the sorted list
count = 0
for (key, X) in key_X_pairs:
if Ys[key] <= Ys[p_front[-1]]: # Look for lower values of Y
if count > 0:
p_front.append(key)
count = count + 1
return remove_interior(p_front, Xs, Ys) | e9a9dd4a56bddd01ae1e071003ea8412b075b9de | 7,176 |
def randthresh(Y,K,p=np.inf,stop=False,verbose=False,varwind=False,knownull=True):
"""
Wrapper for random threshold functions (without connexity constraints)
In: Y (n,) Observations
K <int> Some positive integer (lower bound on the number of null hypotheses)
p <float> lp norm
stop <bool> Stop when minimum is attained (save computation time)
verbose <bool> 'Chatty' mode
varwind <bool> Varying window variant (vs. fixed window, with width K)
knownull <bool> Known null distribution (observations assumed Exp(1) under H0)
versus unknown (observations assumed Gaussian under H0)
Out: A dictionary D containing the following fields:
"C" (n-K) Lp norm of partial sums fluctuation about their conditional expectation
"thresh" <float> Detection threshold
"detect" (k,) Index of detected activations
"v" <float> Estimated null variance (if knownull is False)
Note: Random thresholding is performed only if null hypothesis of no activations is rejected
at level 5%
"""
D = {}
# Test presence of activity
if knownull:
X = Y
else:
v = np.square(Y).mean()
X = np.clip(-np.log(1 - ST.chi2.cdf(Y**2, 1, 0, scale=v)), 0, 1 / tol)
D["v"] = v
T = test_stat(X,p=np.inf)
if T <= 0.65:
print "No activity detected at 5% level"
D["detect"] = np.array([])
D["thresh"] = np.inf
else:
# Find optimal threshold
if varwind:
if knownull:
C = randthresh_varwind_knownull(Y,K,p,stop,verbose)
else:
C, V = randthresh_varwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose)
else:
if knownull:
C = randthresh_fixwind_knownull(Y,K,p,stop,verbose)
else:
C, V = randthresh_fixwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose)
n = len(X)
if stop:
I = np.where(C > 0)[0]
if len(I) > 0:
ncoeffs = I[-1]
else:
ncoeffs = n - K
else:
I = np.where((C[2:] > C[1:-1]) * (C[1:-1] < C[:-2]))[0]
if len(I) > 0:
ncoeffs = I[np.argmin(C[1:-1][I])] + 1
else:
ncoeffs = n - K
thresh = np.sort(np.abs(Y))[-ncoeffs]
# Detected activations
detect = np.where(np.abs(Y) > thresh)[0]
D["C"] = C[2:]
D["thresh"] = thresh
D["detect"] = detect
if not knownull:
D["v"] = V[2:]
return D | d42a9c4ddd27c3ad462d6a447b779db700a58976 | 7,177 |
import gc
def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ?
"""get functions defined inside of func (e.g. inner functions in a closure)
NOTE: results may differ if the function has been executed or not.
If len(nestedcode(func)) > len(referrednested(func)), try calling func().
If possible, python builds code objects, but delays building functions
until func() is called.
"""
if PY3:
att1 = '__code__'
att0 = '__func__'
else:
att1 = 'func_code' # functions
att0 = 'im_func' # methods
funcs = set()
# get the code objects, and try to track down by referrence
for co in nestedcode(func, recurse):
# look for function objects that refer to the code object
for obj in gc.get_referrers(co):
# get methods
_ = getattr(obj, att0, None) # ismethod
if getattr(_, att1, None) is co: funcs.add(obj)
# get functions
elif getattr(obj, att1, None) is co: funcs.add(obj)
# get frame objects
elif getattr(obj, 'f_code', None) is co: funcs.add(obj)
# get code objects
elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)
# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars
# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames
# frameobjs are not found, however funcobjs are...
# (see: test_mixins.quad ... and test_mixins.wtf)
# after execution, code objects get compiled, and then may be found by gc
return list(funcs) | 357fde8030423690a5ae2f8ebcf42c7e86337d2a | 7,178 |
from typing import Dict
from typing import Any
from typing import Tuple
def format_organizations_output(response: Dict[str, Any], page_number: int, limit: int) -> Tuple[list, int]:
"""
Formatting list organizations command outputs.
Args:
response (Dict[str,Any): The response from the API call.
limit (int): Maximum number of results to return.
page_number(int): The Page number to retrieve.
Returns:
Tuple[list,int]: Formatted command output and total results.
"""
formatted_organizations = []
relevant_output_entities, total_page_number = format_list_commands_output(response,
['response', 'result', 'domains',
'domain'], page_number, limit)
for organization in relevant_output_entities:
formatted_organization = {}
for key, value in organization.items():
if key.startswith('@'):
formatted_organization[key[1:]] = value
else:
formatted_organization[key] = value
formatted_organizations.append(formatted_organization)
return formatted_organizations, total_page_number | 6eee18e58fd8b6fdba50f995df060689bdb63ef2 | 7,179 |
def which_db_version(cursor):
"""
Return version of DB schema as string.
Return '5', if iOS 5.
Return '6', if iOS 6 or iOS 7.
"""
query = "select count(*) from sqlite_master where name = 'handle'"
cursor.execute(query)
count = cursor.fetchone()[0]
if count == 1:
db_version = '6'
else:
db_version = '5'
return db_version | 07b1dbcea3fb4bf65bba5c578257440d39b6784c | 7,180 |
async def get_number_of_images_from_category(category : str):
"""
TODO docstring
Get number of images in category
"""
categories_query = CATEGORIES_DB.search(where('category') == category)
if not categories_query:
return {"number_of_images": 0}
return {"number_of_images": len(categories_query)} | 73613ef2abaa2ccd5104d569d88b3e3476183610 | 7,181 |
def gaussian_total_correlation(cov):
"""Computes the total correlation of a Gaussian with covariance matrix cov.
We use that the total correlation is the KL divergence between the Gaussian
and the product of its marginals. By design, the means of these two Gaussians
are zero and the covariance matrix of the second Gaussian is equal to the
covariance matrix of the first Gaussian with off-diagonal entries set to zero.
Args:
cov: Numpy array with covariance matrix.
Returns:
Scalar with total correlation.
"""
return 0.5 * (np.sum(np.log(np.diag(cov))) - np.linalg.slogdet(cov)[1]) | 93b52d075cba08c58067f7e2c6b76e8c5b06fa76 | 7,182 |
def S3list(s3bucket, fdate, instrm, network='OKLMA'):
"""
get list of files in a s3 bucket for a specific fdate and instrument (prefix)
fdate: e.g. '2017-05-17'
instrm: e.g. 'GLM'
"""
prefix = {'GLM': 'fieldcampaign/goesrplt/GLM/data/L2/' + fdate + '/OR_GLM-L2-LCFA_G16',
'LIS': 'fieldcampaign/goesrplt/ISS_LIS/data/' + fdate + '/ISS_LIS_SC_V1.0_',
# 'FEGS': 'fieldcampaign/goesrplt/FEGS/data/goesr_plt_FEGS_' + fdate.replace('-', '') + '_Flash',
'CRS': 'fieldcampaign/goesrplt/CRS/data/GOESR_CRS_L1B_' + fdate.replace('-', ''),
'NAV': 'fieldcampaign/goesrplt/NAV_ER2/data/goesrplt_naver2_IWG1_' + fdate.replace('-', ''),
'LMA': 'fieldcampaign/goesrplt/LMA/' + network + '/data/' + fdate + '/goesr_plt_' + network + '_' + fdate.replace(
'-', '')}
print("S3list searching for ", prefix[instrm])
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3bucket)
keys = []
for obj in bucket.objects.filter(Prefix=prefix[instrm]):
keys.append(obj.key)
return keys | afe77daf5b78545ae89a555064511c3be19947f0 | 7,183 |
def formatted_karma(user, activity):
"""
Performs a karma check for the user and returns a String that's already formatted exactly like the usual response of the bot.
:param user: The user the karma check will be performed for.
:return: A conveniently formatted karma
check response.
"""
response = good_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0])
if activity[3] > activity[0]/3:
response = bad_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[4], activity[0], activity[3])
elif activity[1] < 2 and activity[2] < 5:
response = new_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0])
return response | fa130f6bd64763200ed76a9284f9e83c686b7fb7 | 7,184 |
import collections
def extras_features(*features):
"""
Decorator used to register extras provided features to a model
"""
def wrapper(model_class):
# Initialize the model_features store if not already defined
if "model_features" not in registry:
registry["model_features"] = {f: collections.defaultdict(list) for f in EXTRAS_FEATURES}
for feature in features:
if feature in EXTRAS_FEATURES:
app_label, model_name = model_class._meta.label_lower.split(".")
registry["model_features"][feature][app_label].append(model_name)
else:
raise ValueError("{} is not a valid extras feature!".format(feature))
return model_class
return wrapper | 03ff8f6fe9d020b55f416468cceacf0f163ec102 | 7,185 |
def setFeedMoleFraction(H2COxRatio, CO2COxRatio):
"""
set inlet feed mole fraction
"""
# feed properties
# H2/COx ratio
# H2COxRatio = 2.0
# CO2/CO ratio
# CO2COxRatio = 0.8
# mole fraction
y0_H2O = 0.00001
y0_CH3OH = 0.00001
y0_DME = 0.00001
# total molar fraction
tmf0 = 1 - (y0_H2O + y0_CH3OH + y0_DME)
# COx
COx = tmf0/(H2COxRatio + 1)
# mole fraction
y0_H2 = H2COxRatio*COx
y0_CO2 = CO2COxRatio*COx
y0_CO = COx - y0_CO2
# total mole fraction
tmf = y0_H2 + y0_CO + y0_CO2 + y0_H2O + y0_CH3OH + y0_DME
# CO2/CO2+CO ratio
CO2CO2CORatio = y0_CO2/(y0_CO2+y0_CO)
# res
feedMoFri = np.array([y0_H2, y0_CO2, y0_H2O, y0_CO,
y0_CH3OH, y0_DME], dtype=np.float32)
# res
return feedMoFri | 82d368cd84a06a29663aee4c04a0505dba7536bb | 7,186 |
def format(message, *args, **kwargs):
"""Shortcut for :class:`tossi.Formatter.format` of the default registry.
"""
return formatter.vformat(message, args, kwargs) | 9d32c6a7497ffaa9b0da592f2c5ad828f22cf294 | 7,187 |
from django.http.request import QueryDict
def reverse_url(url_name,id,request):
"""
编辑标签返回当前页
:param url_name:
:param id:
:param request:
:return:
"""
path = request.get_full_path()
query_dict_obj = QueryDict(mutable=True)
query_dict_obj['next'] = path
encode_url = query_dict_obj.urlencode()
prefix_path = reverse(url_name,args=(id,))
full_path = prefix_path + '?' + encode_url
return full_path | 3453fed5717c2d3a335554e0b02965be8b3c04d0 | 7,188 |
from typing import Dict
from typing import List
def add_default_to_data(data: Dict[str, object], schema: SchemaDictType) -> Dict[str, object]:
"""Adds the default values present in the schema to the required fields
if the values are not provided in the data
"""
# add non as defaults to the field that is not required and does not have
# a default value
non_default_values = [i for i in schema if all(
j not in schema[i] for j in ["required", "default"])]
for val in non_default_values:
schema[val]["default"] = None
defaults: List[str] = [j for j in [
i for i in schema if "default" in schema[i]] if "default" in schema[j]]
if not all(i in data for i in defaults):
for i in defaults:
if i not in data:
data[i] = schema[i]["default"]
return data
else:
return data | 58b460eebb675457ed7832b4e211b72e2b018d03 | 7,189 |
import re
def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str:
"""Normalize repeating characters in `text`.
Truncating their number of consecutive repetitions to `maxn`.
Duplicates Textacy's `utils.normalize_repeating_chars`.
Args:
text (str): The text to normalize.
chars: One or more characters whose consecutive repetitions are to be
normalized, e.g. "." or "?!".
maxn: Maximum number of consecutive repetitions of `chars` to which
longer repetitions will be truncated.
Returns:
str
"""
return re.sub(r"({}){{{},}}".format(re.escape(chars), maxn + 1), chars * maxn, text) | 9dc326947a900d3531dcd59bf51d5c3396a42fea | 7,190 |
import io
import csv
def export_data_csv():
""" Build a CSV file with the Order data from the database
:return: The CSV file in StringIO
"""
result = query_order.get_all_orders()
output = io.StringIO()
writer = csv.writer(output)
line = ['Numéro de commande', 'Date', 'Montant total', 'Numéro client', 'Référence devis']
writer.writerow(line)
for row in result:
date = format_date_csv(str(row.orderDate)[:10])
line = [str(row.orderNumber), date, row.orderTotalAmount, str(row.clientNumber), str(row.quoteNumber)]
writer.writerow(line)
output.seek(0)
return output | 5839542a1ef366a63850d04909080b8bca8d4714 | 7,191 |
import re
def findurls(s):
"""Use a regex to pull URLs from a message"""
regex = r"(?i)\b(((https?|ftp|smtp):\/\/)?(www.)?[a-zA-Z0-9_.-]+\.[a-zA-Z0-9_.-]+(\/[a-zA-Z0-9#]+\/?)*\/*)"
url = re.findall(regex,s)
return [x[0] for x in url] | 801947e893a23a4e440c8e5fc838d6aa89671e0c | 7,192 |
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect) | 2111b4d6298cc435d61e12f301d5373cc07c54ff | 7,193 |
import six
import os
def expand_envvars(d):
""" Recursively convert lookup that look like environment vars in a dict
This function things that environmental variables are values that begin
with `$` and are evaluated with :func:`os.path.expandvars`. No exception
will be raised if an environment variable is not set.
Args:
d (dict): expand environment variables used in the values of this
dictionary
Returns:
dict: input dictionary with environment variables expanded
"""
def check_envvar(k, v):
""" Warn if value looks un-expanded """
if '$' in v:
logger.warning('Config key=value pair might still contain '
'environment variables: "%s=%s"' % (k, v))
_d = d.copy()
for k, v in six.iteritems(_d):
if isinstance(v, dict):
_d[k] = expand_envvars(v)
elif isinstance(v, str):
_d[k] = os.path.expandvars(v)
check_envvar(k, v)
elif isinstance(v, (list, tuple)):
n_v = []
for _v in v:
if isinstance(_v, str):
_v = os.path.expandvars(_v)
check_envvar(k, _v)
n_v.append(_v)
_d[k] = n_v
return _d | dd587364a17d189b3d7751458fb82cf978a61ca0 | 7,194 |
from typing import Callable
def get_minhash(
doc: str,
normalization_func: Callable,
split_method: str,
ngram_size: int,
ngram_stride: int,
num_minhashes: int,
random_seed: int,
) -> LeanMinHash:
"""Returns a minhash fingerprint for the given document.
Args:
doc (str):
The document to create the MinHash object for.
normalization_func (Callable):
The function to normalize the document with.
split_method (str):
The method to split the document into shingles.
Can be 'word_ngram', 'paragraph', 'none' or None.
ngram_size (int):
The size of the ngrams to use.
ngram_stride (int):
The stride of the ngrams to use.
num_minhashes (int):
The number of minhashes to use.
random_seed (int):
The random seed to use.
Returns:
LeanMinHash: The minhash fingerprint for the given document.
Raises:
ValueError:
If `split_method` is not 'word_ngram', 'paragraph', 'none'
or None.
"""
# Extract shingles from the document, depending on the `split_method`
shingles = get_shingles(
doc,
normalization_func=normalization_func,
split_method=split_method,
ngram_size=ngram_size,
ngram_stride=ngram_stride,
)
# Initialise the fingerprint
minhash = MinHash(num_perm=num_minhashes, seed=random_seed)
# Add all the shingles to the fingerprint
minhash.update_batch([shingle.encode("utf-8") for shingle in shingles])
# Convert the fingerprint to a LeanMinHash fingerprint, to save memory
# and increase performance
minhash = LeanMinHash(minhash, seed=random_seed)
# Return the fingerprint
return minhash | 7f9340885a8ec3b9eba85f627550ed9d8f2df6c1 | 7,195 |
import re
def tokenize(text):
"""
Function:
tokenize: This function splits text into words and return the root form of the words
Args:
text(str): the message
Return:
lemm(list of str): a list of the root form of the message words
"""
# Normalizing text (a-zA-Z0-9 matches all allalphanumeric characters)
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenizing text
words = word_tokenize(text)
# Removing stop words
stop = stopwords.words("english")
words = [t for t in words if t not in stop]
# Lemmatization
lemm = [WordNetLemmatizer().lemmatize(w) for w in words]
return lemm | 4ee5cf7bad56f565c211824b1a5838d732cbeab5 | 7,196 |
import pickle
def displayRandomForest():
"""Run displayRandomForest"""
executionStartTime = int(time.time())
# status and message
success = True
message = "ok"
plotUrl = ''
dataUrl = ''
# get model1, var1, pres1, model2, var2, pres2, start time, end time, lon1, lon2, lat1, lat2, nSample
center = []
model = []
var = []
pres = []
nVarP = 1
nVar = int(request.args.get('nVar', ''))
for i in range( nVar+nVarP ):
m1 = request.args.get('model'+str(i+1), '').lower()
temp1 = m1.split('_')
center.append(temp1[0])
model.append(temp1[1])
var.append(request.args.get('var'+str(i+1), ''))
pres.append(request.args.get('pres'+str(i+1), ''))
startT = request.args.get('timeS', '')
endT = request.args.get('timeE', '')
lonS = request.args.get('lonS', '')
lonE = request.args.get('lonE', '')
latS = request.args.get('latS', '')
latE = request.args.get('latE', '')
frontend_url = request.args.get('fromPage', '')
print 'frontend_url: ', frontend_url
userId = request.args.get('userid', '')
print 'from url, userId: ', userId
if userId != None and userId != '':
userId = int(userId)
else:
userId = 0
json1 = {
'nVar':nVar,
'center':center,
'model':model,
'varName':var,
'pres':pres,
'yearS':startT[:4],
'yearE':endT[:4],
'monthS':startT[4:],
'monthE':endT[4:],
'lon1S':lonS,
'lon1E':lonE,
'lat1S':latS,
'lat1E':latE,
}
# get where the input file and output file are
current_dir = os.getcwd()
print 'current_dir: ', current_dir
try:
seed_str = str(time.time())
tag = md5.new(seed_str).hexdigest()
output_dir = current_dir + '/svc/static/randomForest/' + tag
print 'output_dir: ', output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
json1['outDir'] = output_dir
pFile = '%s/p.pickle'%output_dir
fid = open(pFile,'w')
pickle.dump(json1, fid)
fid.close()
# chdir to where the app is
os.chdir(current_dir+'/svc/src/randomForest')
# instantiate the app. class
c1 = call_randomForest.call_randomForest(pFile)
# call the app. function (0 means the image created is scatter plot)
### (message, imgFileName) = c1.displayScatterPlot2V(0)
(message, imgFileName, dataFileName) = c1.display()
# chdir back
os.chdir(current_dir)
ind1 = message.find('No Data')
if ind1>0:
message1 = message[ind1:(ind1+200)]
message1a = message1.split('\n')
print message1a[0]
print message1a[1]
hostname, port = get_host_port2("host.cfg")
### userId = 2
if hostname == 'EC2':
try:
req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4')
response = urllib2.urlopen(req)
hostname = response.read()
except Exception, e:
print 'e: ', e
"""
try:
req2 = urllib2.Request(' http://169.254.169.254/latest/user-data')
response2 = urllib2.urlopen(req2)
userId = json.loads(response2.read())['username']
except Exception, e:
print 'e: ', e
userId = 2
"""
"""
if userIdDict.has_key(userId):
userId = userIdDict[userId]
else :
userId = 'lei'
"""
print 'userId: ', userId
print 'hostname: ', hostname
print 'port: ', port
purpose = request.args.get('purpose')#"Test .\'\"\\purpose"
backend_url, plotUrl, dataUrl, failedImgUrl = assignUrl('randomForest', tag, imgFileName, dataFileName)
# backend_url = 'http://' + hostname + ':' + port + '/svc/randomForest'
# print 'backend_url: ', backend_url
# print 'imgFileName: ', imgFileName
# plotUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + imgFileName
# print 'plotUrl: ', plotUrl
# dataUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + dataFileName
# print 'dataUrl: ', dataUrl
# failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png'
# print 'failedImgUrl: ', failedImgUrl
if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName):
print '****** Error: %s not exist' % imgFileName
plotUrl = failedImgUrl
if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName):
print '****** Error: %s not exist' % dataFileName
dataUrl = failedImgUrl
print 'message: ', message
if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 or message.find('No Data') >= 0:
success = False
plotUrl = ''
dataUrl = ''
except ValueError, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
message = str(e)
except Exception, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
### message = str("Error caught in displayScatterPlot2V()")
message = str(e)
executionEndTime = int(time.time())
urlLink = request.query_string
urlLink = urlLink.strip() + '&image=%s&data_url=%s' % (plotUrl, dataUrl)
print 'urlLink: ', urlLink
urlLink = urlLink.replace('&fromPage='+frontend_url, '')
print 'urlLink: ', urlLink
# json dictionary for provenance service request
post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId),
'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000}
post_json = json.dumps(post_json)
if USE_CMU:
try:
print post_json
print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text
print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text
### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text
except:
print 'Something went wrong with Wei\'s stuff'
return jsonify({
'success': success,
'message': message,
'url': plotUrl,
'dataUrl': dataUrl
}) | a77cce60947d553a870f636fcfc8e3b282b69eea | 7,197 |
def get_reports(request):
"""
Get a list of all :model:`reporting.Report` entries associated with
an individual :model:`users.User` via :model:`rolodex.Project` and
:model:`rolodex.ProjectAssignment`.
"""
active_reports = []
active_projects = (
ProjectAssignment.objects.select_related("project")
.filter(Q(operator=request.user) & Q(project__complete=False))
.order_by("project__end_date")
)
for active_project in active_projects:
reports = Report.objects.filter(
Q(project=active_project.project) & Q(complete=False)
)
for report in reports:
active_reports.append(report)
return active_reports | dc622daf0303e6137a36962db45655de1c43deb2 | 7,198 |
import json
def create_response(key, value):
"""Return generic AWS Lamba proxy response object format."""
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({key: value})
} | 9236a9e4504e6fbebe841b8cc6b6ad4602dae463 | 7,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.