Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def scan(l,**kwargs):
'''
from elist.elist import *
from elist.jprint import pobj
l = [1,[4],2,[3,[5,6]]]
desc = description(l)
l = [1,2,[4],[3,[5,6]]]
desc = description(l)
'''
if('itermode' in kwargs):
itermode = True
else:
itermode = False
####level == 0
desc_matrix = init_desc_matrix(l)
if(desc_matrix[0][0]['leaf'] == True):
return(desc_matrix)
else:
pass
####cache
lcache=LevelCache(datas=l,descs=desc_matrix[0][0])
scache=StateCache(desc_matrix)
pcache = init_pcache_handler_inline(kwargs)
####level > 0
while(lcache.data.__len__() > 0):
#add next desc_level
scache.update()
for unhandled_seq in range(0,lcache.data.__len__()):
#handle parent
pcache.update_pdesc(lcache,unhandled_seq)
for sib_seq in range(0,pcache.sibs_len):
#handle child
pcache.update_desc(lcache,scache,sib_seq)
#update level lcache
lcache.update()
return(desc_matrix) |
def fullfill_descendants_info(desc_matrix):
'''
flat_offset
'''
pathloc_mapping = {}
locpath_mapping = {}
#def leaf_handler(desc,pdesc,offset):
def leaf_handler(desc,pdesc):
#desc['flat_offset'] = (offset,offset+1)
desc['non_leaf_son_paths'] = []
desc['leaf_son_paths'] = []
desc['non_leaf_descendant_paths'] = []
desc['leaf_descendant_paths'] = []
desc['flat_len'] = 1
if(pdesc['flat_len']):
pdesc['flat_len'] = pdesc['flat_len'] + 1
else:
pdesc['flat_len'] = 1
#def non_leaf_handler(desc,pdesc,offset):
def non_leaf_handler(desc,pdesc):
#desc['flat_offset'] = (offset,offset+desc['flat_len'])
pdesc['non_leaf_descendant_paths'].extend(copy.deepcopy(desc['non_leaf_descendant_paths']))
pdesc['leaf_descendant_paths'].extend(copy.deepcopy(desc['leaf_descendant_paths']))
if(pdesc['flat_len']):
pdesc['flat_len'] = pdesc['flat_len'] + desc['flat_len']
else:
pdesc['flat_len'] = desc['flat_len']
def fill_path_mapping(desc):
pmk = tuple(desc['path'])
pmv = tuple(DescMatrix.loc(desc))
pathloc_mapping[pmk] = pmv
locpath_mapping[pmv] = pmk
dm = DescMatrix(desc_matrix)
depth = desc_matrix.__len__()
desc_level = desc_matrix[depth - 1]
length = desc_level.__len__()
#the last level
#offset = 0
for j in range(length - 1,-1,-1):
desc = desc_level[j]
fill_path_mapping(desc)
pdesc = dm.pdesc(desc)
leaf_handler(desc,pdesc)
#leaf_handler(desc,pdesc,offset)
#offset = offset + 1
for i in range(depth-2,0,-1):
#offset = 0
desc_level = desc_matrix[i]
length = desc_level.__len__()
for j in range(length-1,-1,-1):
desc = desc_level[j]
fill_path_mapping(desc)
pdesc = dm.pdesc(desc)
if(desc['leaf']):
leaf_handler(desc,pdesc)
#leaf_handler(desc,pdesc,offset)
#offset = offset + 1
else:
non_leaf_handler(desc,pdesc)
#non_leaf_handler(desc,pdesc,offset)
#offset = offset + desc['flat_len']
desc_matrix[0][0]['flat_offset'] = (0,desc_matrix[0][0]['flat_len'])
for i in range(0,depth-1):
pdesc_level = desc_matrix[i]
length = pdesc_level.__len__()
for j in range(0,length):
pdesc = pdesc_level[j]
si = pdesc['flat_offset'][0]
for i in range(0,pdesc['sons_count']):
spl = append(pdesc['path'],i,mode='new')
pk = tuple(spl)
locx,locy = pathloc_mapping[pk]
son = desc_matrix[locx][locy]
ei = si + son['flat_len']
son['flat_offset'] = (si,ei)
si = ei
return(desc_matrix,pathloc_mapping,locpath_mapping) |
def pathlist_to_getStr(path_list):
'''
>>> pathlist_to_getStr([1, '1', 2])
"[1]['1'][2]"
>>>
'''
t1 = path_list.__repr__()
t1 = t1.lstrip('[')
t1 = t1.rstrip(']')
t2 = t1.split(", ")
s = ''
for i in range(0,t2.__len__()):
s = ''.join((s,'[',t2[i],']'))
return(s) |
def getStr_to_pathlist(gs):
'''
gs = "[1]['1'][2]"
getStr_to_pathlist(gs)
gs = "['u']['u1']"
getStr_to_pathlist(gs)
'''
def numize(w):
try:
int(w)
except:
try:
float(w)
except:
return(w)
else:
return(float(w))
else:
return(int(w))
def strip_quote(w):
if(type(w) == type('')):
if(w[0]==w[-1]):
if((w[0]=="'") |(w[0]=='"')):
return(w[1:-1])
else:
return(w)
else:
return(w)
else:
return(w)
gs = gs[1:-1]
pl = gs.split("][")
pl = array_map(pl,numize)
pl = array_map(pl,strip_quote)
return(pl) |
def get_block_op_pairs(pairs_str):
'''
# >>> get_block_op_pairs("{}[]")
# {1: ('{', '}'), 2: ('[', ']')}
# >>> get_block_op_pairs("{}[]()")
# {1: ('{', '}'), 2: ('[', ']'), 3: ('(', ')')}
# >>> get_block_op_pairs("{}[]()<>")
# {1: ('{', '}'), 2: ('[', ']'), 3: ('(', ')'), 4: ('<', '>')}
'''
pairs_str_len = pairs_str.__len__()
pairs_len = pairs_str_len // 2
pairs_dict = {}
for i in range(1,pairs_len +1):
pairs_dict[i] = pairs_str[i*2-2],pairs_str[i*2-1]
return(pairs_dict) |
def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('{}[]()')):
'''
# is_lop('{',block_op_pairs_dict)
# is_lop('[',block_op_pairs_dict)
# is_lop('}',block_op_pairs_dict)
# is_lop(']',block_op_pairs_dict)
# is_lop('a',block_op_pairs_dict)
'''
for i in range(1,block_op_pairs_dict.__len__()+1):
if(ch == block_op_pairs_dict[i][0]):
return(True)
else:
pass
return(False) |
def get_next_char_level_in_j_str(curr_lv,curr_seq,j_str,block_op_pairs_dict=get_block_op_pairs("{}[]()")):
''' the first-char is level-1
when current is non-op, next-char-level = curr-level
when current is lop, non-paired-rop-next-char-level = lop-level+1;
when current is lop, paired-rop-next-char-level = lop-level
when current is rop, next-char-level = rop-level - 1
# {"key_4_UF0aJJ6v": "value_1", "key_2_Hd0t": ["value_16", "value_8", "value_8", "value_15", "value_14", "value_19", {......
# 122222222222222222222222222222222222222222222333333333333333333333333333333333333333333333333333333333333333333333334......
# {\n"key_4_UF0aJJ6v": "value_1", \n"key_2_Hd0t": [\n"value_16", \n"value_8", \n"value_8", \n"value_15", \n"value_14", \n"value_19",......
# 1 222222222222222222222222222222 2222222222222222 3333333333333 333333333333 333333333333 3333333333333 3333333333333 3333333333333......
'''
curr_ch = j_str[curr_seq]
next_ch = j_str[curr_seq + 1]
cond = 0
for i in range(1,block_op_pairs_dict.__len__()+1):
if(curr_ch == block_op_pairs_dict[i][0]):
if(next_ch == block_op_pairs_dict[i][1]):
next_lv = curr_lv
else:
next_lv = curr_lv + 1
cond = 1
break
elif(curr_ch == block_op_pairs_dict[i][1]):
if(is_rop(next_ch,block_op_pairs_dict)):
next_lv = curr_lv - 1
else:
next_lv = curr_lv
cond = 1
break
else:
pass
if(cond == 1):
pass
elif(is_rop(next_ch,block_op_pairs_dict)):
next_lv = curr_lv - 1
else:
next_lv = curr_lv
curr_lv = next_lv
curr_seq = curr_seq + 1
return(curr_lv,curr_lv,curr_seq) |
def str_display_width(s):
'''
from elist.utils import *
str_display_width('a')
str_display_width('去')
'''
s= str(s)
width = 0
len = s.__len__()
for i in range(0,len):
sublen = s[i].encode().__len__()
sublen = int(sublen/2 + 1/2)
width = width + sublen
return(width) |
def get_wfsmat(l):
'''
l = ['v_7', 'v_3', 'v_1', 'v_4', ['v_4', 'v_2'], 'v_5', 'v_6', 'v_1', 'v_6', 'v_7', 'v_5', ['v_4', ['v_1', 'v_8', 'v_3', 'v_4', 'v_2', 'v_7', [['v_3', 'v_2'], 'v_4', 'v_5', 'v_1', 'v_3', 'v_1', 'v_2', 'v_5', 'v_8', 'v_8', 'v_7'], 'v_5', 'v_8', 'v_7', 'v_1', 'v_5'], 'v_6'], 'v_4', 'v_5', 'v_8', 'v_5']
get_wfs(l)
'''
ltree = ListTree(l)
vdescmat = ltree.desc
wfsmat = matrix_map(vdescmat,lambda v,ix,iy:v['path'])
wfsmat.pop(0)
return(wfsmat) |
def wfs2mat(wfs):
'''
wfs = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [4, 0], [4, 1], [11, 0], [11, 1], [11, 2], [11, 1, 0], [11, 1, 1], [11, 1, 2], [11, 1, 3], [11, 1, 4], [11, 1, 5], [11, 1, 6], [11, 1, 7], [11, 1, 8], [11, 1, 9], [11, 1, 10], [11, 1, 11], [11, 1, 6, 0], [11, 1, 6, 1], [11, 1, 6, 2], [11, 1, 6, 3], [11, 1, 6, 4], [11, 1, 6, 5], [11, 1, 6, 6], [11, 1, 6, 7], [11, 1, 6, 8], [11, 1, 6, 9], [11, 1, 6, 10], [11, 1, 6, 0, 0], [11, 1, 6, 0, 1]]
'''
wfsmat = []
depth = 0
level = filter(wfs,lambda ele:ele.__len__()==1)
while(level.__len__()>0):
wfsmat.append([])
wfsmat[depth] = level
depth = depth+1
level = filter(wfs,lambda ele:ele.__len__()==depth+1)
return(wfsmat) |
def dfs2wfsmat(dfs):
'''
dfs = [[0], [1], [2], [3], [4], [4, 0], [4, 1], [5], [6], [7], [8], [9], [10], [11], [11, 0], [11, 1], [11, 1, 0], [11, 1, 1], [11, 1, 2], [11, 1, 3], [11, 1, 4], [11, 1, 5], [11, 1, 6], [11, 1, 6, 0], [11, 1, 6, 0, 0], [11, 1, 6, 0, 1], [11, 1, 6, 1], [11, 1, 6, 2], [11, 1, 6, 3], [11, 1, 6, 4], [11, 1, 6, 5], [11, 1, 6, 6], [11, 1, 6, 7], [11, 1, 6, 8], [11, 1, 6, 9], [11, 1, 6, 10], [11, 1, 7], [11, 1, 8], [11, 1, 9], [11, 1, 10], [11, 1, 11], [11, 2], [12], [13], [14], [15]]
dfs2wfs(dfs)
'''
wfsmat = []
depth = 0
level = filter(dfs,lambda ele:ele.__len__()==1)
while(level.__len__()>0):
wfsmat.append([])
wfsmat[depth] = level
depth = depth+1
level = filter(dfs,lambda ele:ele.__len__()==depth+1)
return(wfsmat) |
def parent_handler(self,lcache,i,*args):
'''
_update_pdesc_sons_info
'''
pdesc = lcache.desc[i]
pdesc['sons_count'] = self.sibs_len
pdesc['leaf_son_paths'] = []
pdesc['non_leaf_son_paths'] = []
pdesc['leaf_descendant_paths'] = []
pdesc['non_leaf_descendant_paths'] = []
return(pdesc) |
def child_begin_handler(self,scache,*args):
'''
_creat_child_desc
update depth,parent_breadth_path,parent_path,sib_seq,path,lsib_path,rsib_path,lcin_path,rcin_path
'''
pdesc = self.pdesc
depth = scache.depth
sib_seq = self.sib_seq
sibs_len = self.sibs_len
pdesc_level = scache.pdesc_level
desc = copy.deepcopy(pdesc)
desc = reset_parent_desc_template(desc)
desc['depth'] = depth
desc['parent_breadth_path'] = copy.deepcopy(desc['breadth_path'])
desc['sib_seq'] = sib_seq
desc['parent_path'] = copy.deepcopy(desc['path'])
desc['path'].append(sib_seq)
update_desc_lsib_path(desc)
update_desc_rsib_path(desc,sibs_len)
if(depth == 1):
pass
else:
update_desc_lcin_path(desc,pdesc_level)
update_desc_rcin_path(desc,sibs_len,pdesc_level)
return(desc) |
def leaf_handler(self,*args):
'''#leaf child handler'''
desc = self.desc
pdesc = self.pdesc
desc['leaf'] = True
desc['sons_count'] = 0
pdesc['leaf_son_paths'].append(copy.deepcopy(desc['path']))
pdesc['leaf_descendant_paths'].append(copy.deepcopy(desc['path'])) |
def non_leaf_handler(self,lcache):
'''#nonleaf child handler'''
desc = self.desc
pdesc = self.pdesc
desc['leaf'] = False
pdesc['non_leaf_son_paths'].append(copy.deepcopy(desc['path']))
pdesc['non_leaf_descendant_paths'].append(copy.deepcopy(desc['path']))
lcache.ndata.append(self.data)
lcache.ndesc.append(desc) |
def child_end_handler(self,scache):
'''
_upgrade_breadth_info
update breadth, breadth_path, and add desc to desc_level
'''
desc = self.desc
desc_level = scache.desc_level
breadth = desc_level.__len__()
desc['breadth'] = breadth
desc['breadth_path'].append(breadth)
desc_level.append(desc) |
def parse(self, source):
"""Parse command content from the LaTeX source.
Parameters
----------
source : `str`
The full source of the tex document.
Yields
------
parsed_command : `ParsedCommand`
Yields parsed commands instances for each occurence of the command
in the source.
"""
command_regex = self._make_command_regex(self.name)
for match in re.finditer(command_regex, source):
self._logger.debug(match)
start_index = match.start(0)
yield self._parse_command(source, start_index) |
def _parse_command(self, source, start_index):
"""Parse a single command.
Parameters
----------
source : `str`
The full source of the tex document.
start_index : `int`
Character index in ``source`` where the command begins.
Returns
-------
parsed_command : `ParsedCommand`
The parsed command from the source at the given index.
"""
parsed_elements = []
# Index of the parser in the source
running_index = start_index
for element in self.elements:
opening_bracket = element['bracket']
closing_bracket = self._brackets[opening_bracket]
# Find the opening bracket.
element_start = None
element_end = None
for i, c in enumerate(source[running_index:], start=running_index):
if c == element['bracket']:
element_start = i
break
elif c == '\n':
# No starting bracket on the line.
if element['required'] is True:
# Try to parse a single single-word token after the
# command, like '\input file'
content = self._parse_whitespace_argument(
source[running_index:],
self.name)
return ParsedCommand(
self.name,
[{'index': element['index'],
'name': element['name'],
'content': content.strip()}],
start_index,
source[start_index:i])
else:
# Give up on finding an optional element
break
# Handle cases when the opening bracket is never found.
if element_start is None and element['required'] is False:
# Optional element not found. Continue to next element,
# not advancing the running_index of the parser.
continue
elif element_start is None and element['required'] is True:
message = ('Parsing command {0} at index {1:d}, '
'did not detect element {2:d}'.format(
self.name,
start_index,
element['index']))
raise CommandParserError(message)
# Find the closing bracket, keeping track of the number of times
# the same type of bracket was opened and closed.
balance = 1
for i, c in enumerate(source[element_start + 1:],
start=element_start + 1):
if c == opening_bracket:
balance += 1
elif c == closing_bracket:
balance -= 1
if balance == 0:
element_end = i
break
if balance > 0:
message = ('Parsing command {0} at index {1:d}, '
'did not find closing bracket for required '
'command element {2:d}'.format(
self.name,
start_index,
element['index']))
raise CommandParserError(message)
# Package the parsed element's content.
element_content = source[element_start + 1:element_end]
parsed_element = {
'index': element['index'],
'name': element['name'],
'content': element_content.strip()
}
parsed_elements.append(parsed_element)
running_index = element_end + 1
command_source = source[start_index:running_index]
parsed_command = ParsedCommand(self.name, parsed_elements,
start_index, command_source)
return parsed_command |
def _parse_whitespace_argument(source, name):
r"""Attempt to parse a single token on the first line of this source.
This method is used for parsing whitespace-delimited arguments, like
``\input file``. The source should ideally contain `` file`` along
with a newline character.
>>> source = 'Line 1\n' r'\input test.tex' '\nLine 2'
>>> LatexCommand._parse_whitespace_argument(source, 'input')
'test.tex'
Bracket delimited arguments (``\input{test.tex}``) are handled in
the normal logic of `_parse_command`.
"""
# First match the command name itself so that we find the argument
# *after* the command
command_pattern = r'\\(' + name + r')(?:[\s{[%])'
command_match = re.search(command_pattern, source)
if command_match is not None:
# Trim `source` so we only look after the command
source = source[command_match.end(1):]
# Find the whitespace-delimited argument itself.
pattern = r'(?P<content>\S+)(?:[ %\t\n]+)'
match = re.search(pattern, source)
if match is None:
message = (
'When parsing {}, did not find whitespace-delimited command '
'argument'
)
raise CommandParserError(message.format(name))
content = match.group('content')
content.strip()
return content |
def _tmdd_datetime_to_iso(dt, include_offset=True, include_seconds=True):
"""
dt is an xml Element with <date>, <time>, and optionally <offset> children.
returns an ISO8601 string
"""
datestring = dt.findtext('date')
timestring = dt.findtext('time')
assert len(datestring) == 8
assert len(timestring) >= 6
iso = datestring[0:4] + '-' + datestring[4:6] + '-' + datestring[6:8] + 'T' \
+ timestring[0:2] + ':' + timestring[2:4]
if include_seconds:
iso += ':' + timestring[4:6]
if include_offset:
offset = dt.findtext('offset')
if offset:
assert len(offset) == 5
iso += offset[0:3] + ':' + offset[3:5]
else:
raise Exception("TMDD date is not timezone-aware: %s" % etree.tostring(dt))
return iso |
def _generate_automatic_headline(c):
"""The only field that maps closely to Open511 <headline>, a required field, is optional
in TMDD. So we sometimes need to generate our own."""
# Start with the event type, e.g. "Incident"
headline = c.data['event_type'].replace('_', ' ').title()
if c.data['roads']:
# Add the road name
headline += ' on ' + c.data['roads'][0]['name']
direction = c.data['roads'][0].get('direction')
if direction and direction not in ('BOTH', 'NONE'):
headline += ' ' + direction
return headline |
def _get_severity(c):
"""
1. Collect all <severity> and <impact-level> values.
2. Convert impact-level of 1-3 to MINOR, 4-7 to MODERATE, 8-10 to MAJOR
3. Map severity -> none to MINOR, natural-disaster to MAJOR, other to UNKNOWN
4. Pick the highest severity.
"""
severities = c.feu.xpath('event-indicators/event-indicator/event-severity/text()|event-indicators/event-indicator/severity/text()')
impacts = c.feu.xpath('event-indicators/event-indicator/event-impact/text()|event-indicators/event-indicator/impact/text()')
severities = [convert_severity[s] for s in severities]
impacts = [convert_impact[i] for i in impacts]
return ['UNKNOWN', 'MINOR', 'MODERATE', 'MAJOR'][max(itertools.chain(severities, impacts))] |
def list_from_document(cls, doc):
"""Returns a list of TMDDEventConverter elements.
doc is an XML Element containing one or more <FEU> events
"""
objs = []
for feu in doc.xpath('//FEU'):
detail_els = feu.xpath('event-element-details/event-element-detail')
for idx, detail in enumerate(detail_els):
objs.append(cls(feu, detail, id_suffix=idx, number_in_group=len(detail_els)))
return objs |
def add_geo(self, geo_location):
"""
Saves a <geo-location> Element, to be incoporated into the Open511
geometry field.
"""
if not geo_location.xpath('latitude') and geo_location.xpath('longitude'):
raise Exception("Invalid geo-location %s" % etree.tostring(geo_location))
if _xpath_or_none(geo_location, 'horizontal-datum/text()') not in ('wgs84', None):
logger.warning("Unsupported horizontal-datum in %s" % etree.tostring(geo_location))
return
point = (
float(_xpath_or_none(geo_location, 'longitude/text()')) / 1000000,
float(_xpath_or_none(geo_location, 'latitude/text()')) / 1000000
)
self.points.add(point) |
def clone(src, dst_path, skip_globals, skip_dimensions, skip_variables):
"""
Mostly ripped from nc3tonc4 in netCDF4-python.
Added ability to skip dimension and variables.
Removed all of the unpacking logic for shorts.
"""
if os.path.exists(dst_path):
os.unlink(dst_path)
dst = netCDF4.Dataset(dst_path, 'w')
# Global attributes
for attname in src.ncattrs():
if attname not in skip_globals:
setattr(dst, attname, getattr(src, attname))
# Dimensions
unlimdim = None
unlimdimname = False
for dimname, dim in src.dimensions.items():
# Skip what we need to
if dimname in skip_dimensions:
continue
if dim.isunlimited():
unlimdim = dim
unlimdimname = dimname
dst.createDimension(dimname, None)
else:
dst.createDimension(dimname, len(dim))
# Variables
for varname, ncvar in src.variables.items():
# Skip what we need to
if varname in skip_variables:
continue
hasunlimdim = False
if unlimdimname and unlimdimname in ncvar.dimensions:
hasunlimdim = True
filler = None
if hasattr(ncvar, '_FillValue'):
filler = ncvar._FillValue
if ncvar.chunking == "contiguous":
var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler)
else:
var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler, chunksizes=ncvar.chunking())
# Attributes
for attname in ncvar.ncattrs():
if attname == '_FillValue':
continue
else:
setattr(var, attname, getattr(ncvar, attname))
# Data
nchunk = 1000
if hasunlimdim:
if nchunk:
start = 0
stop = len(unlimdim)
step = nchunk
if step < 1:
step = 1
for n in range(start, stop, step):
nmax = n + nchunk
if nmax > len(unlimdim):
nmax = len(unlimdim)
idata = ncvar[n:nmax]
var[n:nmax] = idata
else:
idata = ncvar[:]
var[0:len(unlimdim)] = idata
else:
idata = ncvar[:]
var[:] = idata
dst.sync()
src.close()
dst.close() |
def get_dataframe_from_variable(nc, data_var):
""" Returns a Pandas DataFrame of the data.
This always returns positive down depths
"""
time_var = nc.get_variables_by_attributes(standard_name='time')[0]
depth_vars = nc.get_variables_by_attributes(axis=lambda v: v is not None and v.lower() == 'z')
depth_vars += nc.get_variables_by_attributes(standard_name=lambda v: v in ['height', 'depth' 'surface_altitude'], positive=lambda x: x is not None)
# Find the correct depth variable
depth_var = None
for d in depth_vars:
try:
if d._name in data_var.coordinates.split(" ") or d._name in data_var.dimensions:
depth_var = d
break
except AttributeError:
continue
times = netCDF4.num2date(time_var[:], units=time_var.units, calendar=getattr(time_var, 'calendar', 'standard'))
original_times_size = times.size
if depth_var is None and hasattr(data_var, 'sensor_depth'):
depth_type = get_type(data_var.sensor_depth)
depths = np.asarray([data_var.sensor_depth] * len(times)).flatten()
values = data_var[:].flatten()
elif depth_var is None:
depths = np.asarray([np.nan] * len(times)).flatten()
depth_type = get_type(depths)
values = data_var[:].flatten()
else:
depths = depth_var[:]
depth_type = get_type(depths)
if len(data_var.shape) > 1:
times = np.repeat(times, depths.size)
depths = np.tile(depths, original_times_size)
values = data_var[:, :].flatten()
else:
values = data_var[:].flatten()
if getattr(depth_var, 'positive', 'down').lower() == 'up':
logger.warning("Converting depths to positive down before returning the DataFrame")
depths = depths * -1
# https://github.com/numpy/numpy/issues/4595
# We can't call astype on a MaskedConstant
if (
isinstance(depths, np.ma.core.MaskedConstant) or
(hasattr(depths, 'mask') and depths.mask.all())
):
depths = np.asarray([np.nan] * len(times)).flatten()
df = pd.DataFrame({ 'time': times,
'value': values.astype(data_var.dtype),
'unit': data_var.units if hasattr(data_var, 'units') else np.nan,
'depth': depths.astype(depth_type) })
df.set_index([pd.DatetimeIndex(df['time']), pd.Float64Index(df['depth'])], inplace=True)
return df |
async def github_request(session, api_token,
query=None, mutation=None, variables=None):
"""Send a request to the GitHub v4 (GraphQL) API.
The request is asynchronous, with asyncio.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
query : `str` or `GitHubQuery`
GraphQL query string. If provided, then the ``mutation`` parameter
should not be set. For examples, see the `GitHub guide to query and
mutation operations`_.
mutation : `str` or `GitHubQuery`
GraphQL mutation string. If provided, then the ``query`` parameter
should not be set. For examples, see the `GitHub guide to query and
mutation operations`_.
variables : `dict`
GraphQL variables, as a JSON-compatible dictionary. This is only
required if the ``query`` or ``mutation`` uses GraphQL variables.
Returns
-------
data : `dict`
Parsed JSON as a `dict` object.
.. `GitHub personal access token guide`: https://ls.st/41d
.. `GitHub guide to query and mutation operations`: https://ls.st/9s7
"""
payload = {}
if query is not None:
payload['query'] = str(query) # converts a GitHubQuery
if mutation is not None:
payload['mutation'] = str(mutation) # converts a GitHubQuery
if variables is not None:
payload['variables'] = variables
headers = {'Authorization': 'token {}'.format(api_token)}
url = 'https://api.github.com/graphql'
async with session.post(url, json=payload, headers=headers) as response:
data = await response.json()
return data |
def load(cls, query_name):
"""Load a pre-made query.
These queries are distributed with lsstprojectmeta. See
:file:`lsstrojectmeta/data/githubv4/README.rst` inside the
package repository for details on available queries.
Parameters
----------
query_name : `str`
Name of the query, such as ``'technote_repo'``.
Returns
-------
github_query : `GitHubQuery
A GitHub query or mutation object that you can pass to
`github_request` to execute the request itself.
"""
template_path = os.path.join(
os.path.dirname(__file__),
'../data/githubv4',
query_name + '.graphql')
with open(template_path) as f:
query_data = f.read()
return cls(query_data, name=query_name) |
def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None):
"""Obtain the timestamp for the most recent commit to a given file in a
Git repository.
Parameters
----------
filepath : `str`
Absolute or repository-relative path for a file.
repo_path : `str`, optional
Path to the Git repository. Leave as `None` to use the current working
directory or if a ``repo`` argument is provided.
repo : `git.Repo`, optional
A `git.Repo` instance.
Returns
-------
commit_timestamp : `datetime.datetime`
The datetime of the most recent commit to the given file.
Raises
------
IOError
Raised if the ``filepath`` does not exist in the Git repository.
"""
logger = logging.getLogger(__name__)
if repo is None:
repo = git.repo.base.Repo(path=repo_path,
search_parent_directories=True)
repo_path = repo.working_tree_dir
head_commit = repo.head.commit
# filepath relative to the repo path
logger.debug('Using Git repo at %r', repo_path)
filepath = os.path.relpath(
os.path.abspath(filepath),
start=repo_path)
logger.debug('Repo-relative filepath is %r', filepath)
# Most recent commit datetime of the given file.
# Don't use head_commit.iter_parents because then it skips the
# commit of a file that's added but never modified.
for commit in head_commit.iter_items(repo,
head_commit,
[filepath],
skip=0):
return commit.committed_datetime
# Only get here if git could not find the file path in the history
raise IOError('File {} not found'.format(filepath)) |
def get_content_commit_date(extensions, acceptance_callback=None,
root_dir='.'):
"""Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
# Cache the repo object for each query
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
# Iterate over all files with all file extensions, looking for the
# newest commit datetime.
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug('Found content path %r', content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug('Commit timestamp of %r is %s',
content_path, commit_datetime)
except IOError:
logger.warning(
'Count not get commit for %r, skipping',
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
# Seed initial newest_datetime
# or set a newer newest_datetime
newest_datetime = commit_datetime
logger.debug('Newest commit timestamp is %s', newest_datetime)
logger.debug('Final commit timestamp is %s', newest_datetime)
if newest_datetime is None:
raise RuntimeError('No content files found in {}'.format(root_dir))
return newest_datetime |
def _iter_filepaths_with_extension(extname, root_dir='.'):
"""Iterative over relative filepaths of files in a directory, and
sub-directories, with the given extension.
Parameters
----------
extname : `str`
Extension name (such as 'txt' or 'rst'). Extension comparison is
case sensitive.
root_dir : 'str`, optional
Root directory. Current working directory by default.
Yields
------
filepath : `str`
File path, relative to ``root_dir``, with the given extension.
"""
# needed for comparison with os.path.splitext
if not extname.startswith('.'):
extname = '.' + extname
root_dir = os.path.abspath(root_dir)
for dirname, sub_dirnames, filenames in os.walk(root_dir):
for filename in filenames:
if os.path.splitext(filename)[-1] == extname:
full_filename = os.path.join(dirname, filename)
rel_filepath = os.path.relpath(full_filename, start=root_dir)
yield rel_filepath |
def get_variables_by_attributes(self, **kwargs):
""" Returns variables that match specific conditions.
* Can pass in key=value parameters and variables are returned that
contain all of the matches. For example,
>>> # Get variables with x-axis attribute.
>>> vs = nc.get_variables_by_attributes(axis='X')
>>> # Get variables with matching "standard_name" attribute.
>>> nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')
* Can pass in key=callable parameter and variables are returned if the
callable returns True. The callable should accept a single parameter,
the attribute value. None is given as the attribute value when the
attribute does not exist on the variable. For example,
>>> # Get Axis variables.
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
>>> # Get variables that don't have an "axis" attribute.
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None)
>>> # Get variables that have a "grid_mapping" attribute.
>>> vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)
"""
vs = []
has_value_flag = False
for vname in self.variables:
var = self.variables[vname]
for k, v in kwargs.items():
if callable(v):
has_value_flag = v(getattr(var, k, None))
if has_value_flag is False:
break
elif hasattr(var, k) and getattr(var, k) == v:
has_value_flag = True
else:
has_value_flag = False
break
if has_value_flag is True:
vs.append(self.variables[vname])
return vs |
def json_attributes(self, vfuncs=None):
"""
vfuncs can be any callable that accepts a single argument, the
Variable object, and returns a dictionary of new attributes to
set. These will overwrite existing attributes
"""
vfuncs = vfuncs or []
js = {'global': {}}
for k in self.ncattrs():
js['global'][k] = self.getncattr(k)
for varname, var in self.variables.items():
js[varname] = {}
for k in var.ncattrs():
z = var.getncattr(k)
try:
assert not np.isnan(z).all()
js[varname][k] = z
except AssertionError:
js[varname][k] = None
except TypeError:
js[varname][k] = z
for vf in vfuncs:
try:
js[varname].update(vfuncs(var))
except BaseException:
logger.exception("Could not apply custom variable attribue function")
return json.loads(json.dumps(js, cls=BasicNumpyEncoder)) |
def ensure_pandoc(func):
"""Decorate a function that uses pypandoc to ensure that pandoc is
installed if necessary.
"""
logger = logging.getLogger(__name__)
@functools.wraps(func)
def _install_and_run(*args, **kwargs):
try:
# First try to run pypandoc function
result = func(*args, **kwargs)
except OSError:
# Install pandoc and retry
message = "pandoc needed but not found. Now installing it for you."
logger.warning(message)
# This version of pandoc is known to be compatible with both
# pypandoc.download_pandoc and the functionality that
# lsstprojectmeta needs. Travis CI tests are useful for ensuring
# download_pandoc works.
pypandoc.download_pandoc(version='1.19.1')
logger.debug("pandoc download complete")
result = func(*args, **kwargs)
return result
return _install_and_run |
def convert_text(content, from_fmt, to_fmt, deparagraph=False, mathjax=False,
smart=True, extra_args=None):
"""Convert text from one markup format to another using pandoc.
This function is a thin wrapper around `pypandoc.convert_text`.
Parameters
----------
content : `str`
Original content.
from_fmt : `str`
Format of the original ``content``. Format identifier must be one of
those known by Pandoc. See https://pandoc.org/MANUAL.html for details.
to_fmt : `str`
Output format for the content.
deparagraph : `bool`, optional
If `True`, then the
`lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is
used to remove paragraph (``<p>``, for example) tags around a single
paragraph of content. That filter does not affect content that
consists of multiple blocks (several paragraphs, or lists, for
example). Default is `False`.
For example, **without** this filter Pandoc will convert
the string ``"Title text"`` to ``"<p>Title text</p>"`` in HTML. The
paragraph tags aren't useful if you intend to wrap the converted
content in different tags, like ``<h1>``, using your own templating
system.
**With** this filter, Pandoc will convert the string ``"Title text"``
to ``"Title text"`` in HTML.
mathjax : `bool`, optional
If `True` then Pandoc will markup output content to work with MathJax.
Default is False.
smart : `bool`, optional
If `True` (default) then ascii characters will be converted to unicode
characters like smart quotes and em dashes.
extra_args : `list`, optional
Sequence of Pandoc arguments command line arguments (such as
``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart``
arguments are convenience arguments that are equivalent to items
in ``extra_args``.
Returns
-------
output : `str`
Content in the output (``to_fmt``) format.
Notes
-----
This function will automatically install Pandoc if it is not available.
See `ensure_pandoc`.
"""
logger = logging.getLogger(__name__)
if extra_args is not None:
extra_args = list(extra_args)
else:
extra_args = []
if mathjax:
extra_args.append('--mathjax')
if smart:
extra_args.append('--smart')
if deparagraph:
extra_args.append('--filter=lsstprojectmeta-deparagraph')
extra_args.append('--wrap=none')
# de-dupe extra args
extra_args = set(extra_args)
logger.debug('Running pandoc from %s to %s with extra_args %s',
from_fmt, to_fmt, extra_args)
output = pypandoc.convert_text(content, to_fmt, format=from_fmt,
extra_args=extra_args)
return output |
def convert_lsstdoc_tex(
content, to_fmt, deparagraph=False, mathjax=False,
smart=True, extra_args=None):
"""Convert lsstdoc-class LaTeX to another markup format.
This function is a thin wrapper around `convert_text` that automatically
includes common lsstdoc LaTeX macros.
Parameters
----------
content : `str`
Original content.
to_fmt : `str`
Output format for the content (see https://pandoc.org/MANUAL.html).
For example, 'html5'.
deparagraph : `bool`, optional
If `True`, then the
`lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is
used to remove paragraph (``<p>``, for example) tags around a single
paragraph of content. That filter does not affect content that
consists of multiple blocks (several paragraphs, or lists, for
example). Default is `False`.
For example, **without** this filter Pandoc will convert
the string ``"Title text"`` to ``"<p>Title text</p>"`` in HTML. The
paragraph tags aren't useful if you intend to wrap the converted
content in different tags, like ``<h1>``, using your own templating
system.
**With** this filter, Pandoc will convert the string ``"Title text"``
to ``"Title text"`` in HTML.
mathjax : `bool`, optional
If `True` then Pandoc will markup output content to work with MathJax.
Default is False.
smart : `bool`, optional
If `True` (default) then ascii characters will be converted to unicode
characters like smart quotes and em dashes.
extra_args : `list`, optional
Sequence of Pandoc arguments command line arguments (such as
``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart``
arguments are convenience arguments that are equivalent to items
in ``extra_args``.
Returns
-------
output : `str`
Content in the output (``to_fmt``) format.
Notes
-----
This function will automatically install Pandoc if it is not available.
See `ensure_pandoc`.
"""
augmented_content = '\n'.join((LSSTDOC_MACROS, content))
return convert_text(
augmented_content, 'latex', to_fmt,
deparagraph=deparagraph, mathjax=mathjax,
smart=smart, extra_args=extra_args) |
def decode_jsonld(jsonld_text):
"""Decode a JSON-LD dataset, including decoding datetime
strings into `datetime.datetime` objects.
Parameters
----------
encoded_dataset : `str`
The JSON-LD dataset encoded as a string.
Returns
-------
jsonld_dataset : `dict`
A JSON-LD dataset.
Examples
--------
>>> doc = '{"dt": "2018-01-01T12:00:00Z"}'
>>> decode_jsonld(doc)
{'dt': datetime.datetime(2018, 1, 1, 12, 0, tzinfo=datetime.timezone.utc)}
"""
decoder = json.JSONDecoder(object_pairs_hook=_decode_object_pairs)
return decoder.decode(jsonld_text) |
def default(self, obj):
"""Encode values as JSON strings.
This method overrides the default implementation from
`json.JSONEncoder`.
"""
if isinstance(obj, datetime.datetime):
return self._encode_datetime(obj)
# Fallback to the default encoding
return json.JSONEncoder.default(self, obj) |
def _encode_datetime(self, dt):
"""Encode a datetime in the format '%Y-%m-%dT%H:%M:%SZ'.
The datetime can be naieve (doesn't have timezone info) or aware
(it does have a tzinfo attribute set). Regardless, the datetime
is transformed into UTC.
"""
if dt.tzinfo is None:
# Force it to be a UTC datetime
dt = dt.replace(tzinfo=datetime.timezone.utc)
# Convert to UTC (no matter what)
dt = dt.astimezone(datetime.timezone.utc)
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') |
def find_repos(self, depth=10):
'''Get all git repositories within this environment'''
repos = []
for root, subdirs, files in walk_dn(self.root, depth=depth):
if 'modules' in root:
continue
if '.git' in subdirs:
repos.append(root)
return repos |
def clone(self, repo_path, destination, branch=None):
'''Clone a repository to a destination relative to envrionment root'''
logger.debug('Installing ' + repo_path)
if not destination.startswith(self.env_path):
destination = unipath(self.env_path, destination)
if branch:
return shell.run('git', 'clone', repo_path, '--branch', branch,
'--single-branch', '--recursive', destination)
return shell.run('git', 'clone', '--recursive', repo_path, destination) |
def pull(self, repo_path, *args):
'''Clone a repository to a destination relative to envrionment root'''
logger.debug('Pulling ' + repo_path)
if not repo_path.startswith(self.env_path):
repo_path = unipath(self.env_path, repo_path)
return shell.run('git', 'pull', *args, **{'cwd': repo_path}) |
def install(self, package):
'''Install a python package using pip'''
logger.debug('Installing ' + package)
shell.run(self.pip_path, 'install', package) |
def upgrade(self, package):
'''Update a python package using pip'''
logger.debug('Upgrading ' + package)
shell.run(self.pip_path, 'install', '--upgrade', '--no-deps', package)
shell.run(self.pip_path, 'install', package) |
def df_quantile(df, nb=100):
"""Returns the nb quantiles for datas in a dataframe
"""
quantiles = np.linspace(0, 1., nb)
res = pd.DataFrame()
for q in quantiles:
res = res.append(df.quantile(q), ignore_index=True)
return res |
def mean(a, rep=0.75, **kwargs):
"""Compute the average along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
"""
return rfunc(a, ma.mean, rep, **kwargs) |
def max(a, rep=0.75, **kwargs):
"""Compute the max along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
"""
return rfunc(a, ma.max, rep, **kwargs) |
def min(a, rep=0.75, **kwargs):
"""Compute the min along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
"""
return rfunc(a, ma.min, rep, **kwargs) |
def rfunc(a, rfunc=None, rep=0.75, **kwargs):
"""Applies func on a if a comes with a representativity coefficient rep,
i.e. ma.count(a)/ma.size(a)>=rep. If not, returns a masked array
"""
if float(ma.count(a)) / ma.size(a) < rep:
return ma.masked
else:
if rfunc is None:
return a
return rfunc(a, **kwargs) |
def rmse(a, b):
"""Returns the root mean square error betwwen a and b
"""
return np.sqrt(np.square(a - b).mean()) |
def nmse(a, b):
"""Returns the normalized mean square error of a and b
"""
return np.square(a - b).mean() / (a.mean() * b.mean()) |
def mfbe(a, b):
"""Returns the mean fractionalized bias error
"""
return 2 * bias(a, b) / (a.mean() + b.mean()) |
def fa(a, b, alpha=2):
"""Returns the factor of 'alpha' (2 or 5 normally)
"""
return np.sum((a > b / alpha) & (a < b * alpha), dtype=float) / len(a) * 100 |
def foex(a, b):
"""Returns the factor of exceedance
"""
return (np.sum(a > b, dtype=float) / len(a) - 0.5) * 100 |
def correlation(a, b):
"""Computes the correlation between a and b, says the Pearson's correlation
coefficient R
"""
diff1 = a - a.mean()
diff2 = b - b.mean()
return (diff1 * diff2).mean() / (np.sqrt(np.square(diff1).mean() * np.square(diff2).mean())) |
def gmb(a, b):
"""Geometric mean bias
"""
return np.exp(np.log(a).mean() - np.log(b).mean()) |
def gmv(a, b):
"""Geometric mean variance
"""
return np.exp(np.square(np.log(a) - np.log(b)).mean()) |
def fmt(a, b):
"""Figure of merit in time
"""
return 100 * np.min([a, b], axis=0).sum() / np.max([a, b], axis=0).sum() |
def fullStats(a, b):
"""Performs several stats on a against b, typically a is the predictions
array, and b the observations array
Returns:
A dataFrame of stat name, stat description, result
"""
stats = [
['bias', 'Bias', bias(a, b)],
['stderr', 'Standard Deviation Error', stderr(a, b)],
['mae', 'Mean Absolute Error', mae(a, b)],
['rmse', 'Root Mean Square Error', rmse(a, b)],
['nmse', 'Normalized Mean Square Error', nmse(a, b)],
['mfbe', 'Mean Fractionalized bias Error', mfbe(a, b)],
['fa2', 'Factor of Two', fa(a, b, 2)],
['foex', 'Factor of Exceedance', foex(a, b)],
['correlation', 'Correlation R', correlation(a, b)],
['determination', 'Coefficient of Determination r2', determination(a, b)],
['gmb', 'Geometric Mean Bias', gmb(a, b)],
['gmv', 'Geometric Mean Variance', gmv(a, b)],
['fmt', 'Figure of Merit in Time', fmt(a, b)]
]
rec = np.rec.fromrecords(stats, names=('stat', 'description', 'result'))
df = pd.DataFrame.from_records(rec, index='stat')
return df |
def site_path(self):
'''Path to environments site-packages'''
if platform == 'win':
return unipath(self.path, 'Lib', 'site-packages')
py_ver = 'python{0}'.format(sys.version[:3])
return unipath(self.path, 'lib', py_ver, 'site-packages') |
def _pre_activate(self):
'''
Prior to activating, store everything necessary to deactivate this
environment.
'''
if 'CPENV_CLEAN_ENV' not in os.environ:
if platform == 'win':
os.environ['PROMPT'] = '$P$G'
else:
os.environ['PS1'] = '\\u@\\h:\\w\\$'
clean_env_path = utils.get_store_env_tmp()
os.environ['CPENV_CLEAN_ENV'] = clean_env_path
utils.store_env(path=clean_env_path)
else:
utils.restore_env_from_file(os.environ['CPENV_CLEAN_ENV']) |
def _activate(self):
'''
Do some serious mangling to the current python environment...
This is necessary to activate an environment via python.
'''
old_syspath = set(sys.path)
site.addsitedir(self.site_path)
site.addsitedir(self.bin_path)
new_syspaths = set(sys.path) - old_syspath
for path in new_syspaths:
sys.path.remove(path)
sys.path.insert(1, path)
if not hasattr(sys, 'real_prefix'):
sys.real_prefix = sys.prefix
sys.prefix = self.path |
def remove(self):
'''
Remove this environment
'''
self.run_hook('preremove')
utils.rmtree(self.path)
self.run_hook('postremove') |
def command(self):
'''Command used to launch this application module'''
cmd = self.config.get('command', None)
if cmd is None:
return
cmd = cmd[platform]
return cmd['path'], cmd['args'] |
def create(name_or_path=None, config=None):
'''Create a virtual environment. You can pass either the name of a new
environment to create in your CPENV_HOME directory OR specify a full path
to create an environment outisde your CPENV_HOME.
Create an environment in CPENV_HOME::
>>> cpenv.create('myenv')
Create an environment elsewhere::
>>> cpenv.create('~/custom_location/myenv')
:param name_or_path: Name or full path of environment
:param config: Environment configuration including dependencies etc...
'''
# Get the real path of the environment
if utils.is_system_path(name_or_path):
path = unipath(name_or_path)
else:
path = unipath(get_home_path(), name_or_path)
if os.path.exists(path):
raise OSError('{} already exists'.format(path))
env = VirtualEnvironment(path)
utils.ensure_path_exists(env.path)
if config:
if utils.is_git_repo(config):
Git('').clone(config, env.path)
else:
shutil.copy2(config, env.config_path)
else:
with open(env.config_path, 'w') as f:
f.write(defaults.environment_config)
utils.ensure_path_exists(env.hook_path)
utils.ensure_path_exists(env.modules_path)
env.run_hook('precreate')
virtualenv.create_environment(env.path)
if not utils.is_home_environment(env.path):
EnvironmentCache.add(env)
EnvironmentCache.save()
try:
env.update()
except:
utils.rmtree(path)
logger.debug('Failed to update, rolling back...')
raise
else:
env.run_hook('postcreate')
return env |
def remove(name_or_path):
'''Remove an environment or module
:param name_or_path: name or path to environment or module
'''
r = resolve(name_or_path)
r.resolved[0].remove()
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() |
def launch(module_name, *args, **kwargs):
'''Activates and launches a module
:param module_name: name of module to launch
'''
r = resolve(module_name)
r.activate()
mod = r.resolved[0]
mod.launch(*args, **kwargs) |
def deactivate():
'''Deactivates an environment by restoring all env vars to a clean state
stored prior to activating environments
'''
if 'CPENV_ACTIVE' not in os.environ or 'CPENV_CLEAN_ENV' not in os.environ:
raise EnvironmentError('Can not deactivate environment...')
utils.restore_env_from_file(os.environ['CPENV_CLEAN_ENV']) |
def get_home_path():
''':returns: your home path...CPENV_HOME env var OR ~/.cpenv'''
home = unipath(os.environ.get('CPENV_HOME', '~/.cpenv'))
home_modules = unipath(home, 'modules')
if not os.path.exists(home):
os.makedirs(home)
if not os.path.exists(home_modules):
os.makedirs(home_modules)
return home |
def get_module_paths():
''':returns: paths in CPENV_MODULES env var and CPENV_HOME/modules'''
module_paths = []
cpenv_modules_path = os.environ.get('CPENV_MODULES', None)
if cpenv_modules_path:
module_paths.extend(cpenv_modules_path.split(os.pathsep))
module_paths.append(unipath(get_home_path(), 'modules'))
return module_paths |
def get_environments():
'''Returns a list of all known virtual environments as
:class:`VirtualEnvironment` instances. This includes those in CPENV_HOME
and any others that are cached(created by the current user or activated
once by full path.)
'''
environments = set()
cwd = os.getcwd()
for d in os.listdir(cwd):
if d == 'environment.yml':
environments.add(VirtualEnvironment(cwd))
continue
path = unipath(cwd, d)
if utils.is_environment(path):
environments.add(VirtualEnvironment(path))
home = get_home_path()
for d in os.listdir(home):
path = unipath(home, d)
if utils.is_environment(path):
environments.add(VirtualEnvironment(path))
for env in EnvironmentCache:
environments.add(env)
return sorted(list(environments), key=lambda x: x.name) |
def get_modules():
'''Returns a list of available modules.'''
modules = set()
cwd = os.getcwd()
for d in os.listdir(cwd):
if d == 'module.yml':
modules.add(Module(cwd))
path = unipath(cwd, d)
if utils.is_module(path):
modules.add(Module(cwd))
module_paths = get_module_paths()
for module_path in module_paths:
for d in os.listdir(module_path):
path = unipath(module_path, d)
if utils.is_module(path):
modules.add(Module(path))
return sorted(list(modules), key=lambda x: x.name) |
def get_active_modules():
''':returns: a list of active :class:`Module` s or []'''
modules = os.environ.get('CPENV_ACTIVE_MODULES', None)
if modules:
modules = modules.split(os.pathsep)
return [Module(module) for module in modules]
return [] |
def add_active_module(module):
'''Add a module to CPENV_ACTIVE_MODULES environment variable'''
modules = set(get_active_modules())
modules.add(module)
new_modules_path = os.pathsep.join([m.path for m in modules])
os.environ['CPENV_ACTIVE_MODULES'] = str(new_modules_path) |
def rem_active_module(module):
'''Remove a module from CPENV_ACTIVE_MODULES environment variable'''
modules = set(get_active_modules())
modules.discard(module)
new_modules_path = os.pathsep.join([m.path for m in modules])
os.environ['CPENV_ACTIVE_MODULES'] = str(new_modules_path) |
def format_objects(objects, children=False, columns=None, header=True):
'''Format a list of environments and modules for terminal output'''
columns = columns or ('NAME', 'TYPE', 'PATH')
objects = sorted(objects, key=_type_and_name)
data = []
for obj in objects:
if isinstance(obj, cpenv.VirtualEnvironment):
data.append(get_info(obj))
modules = obj.get_modules()
if children and modules:
for mod in modules:
data.append(get_info(mod, indent=2, root=obj.path))
else:
data.append(get_info(obj))
maxes = [len(max(col, key=len)) for col in zip(*data)]
tmpl = '{:%d} {:%d} {:%d}' % tuple(maxes)
lines = []
if header:
lines.append('\n' + bold_blue(tmpl.format(*columns)))
for obj_data in data:
lines.append(tmpl.format(*obj_data))
return '\n'.join(lines) |
def info():
'''Show context info'''
env = cpenv.get_active_env()
modules = []
if env:
modules = env.get_modules()
active_modules = cpenv.get_active_modules()
if not env and not modules and not active_modules:
click.echo('\nNo active modules...')
return
click.echo(bold('\nActive modules'))
if env:
click.echo(format_objects([env] + active_modules))
available_modules = set(modules) - set(active_modules)
if available_modules:
click.echo(
bold('\nInactive modules in {}\n').format(cyan(env.name))
)
click.echo(format_objects(available_modules, header=False))
else:
click.echo(format_objects(active_modules))
available_shared_modules = set(cpenv.get_modules()) - set(active_modules)
if not available_shared_modules:
return
click.echo(bold('\nInactive shared modules \n'))
click.echo(format_objects(available_shared_modules, header=False)) |
def list_():
'''List available environments and modules'''
environments = cpenv.get_environments()
modules = cpenv.get_modules()
click.echo(format_objects(environments + modules, children=True)) |
def activate(paths, skip_local, skip_shared):
'''Activate an environment'''
if not paths:
ctx = click.get_current_context()
if cpenv.get_active_env():
ctx.invoke(info)
return
click.echo(ctx.get_help())
examples = (
'\nExamples: \n'
' cpenv activate my_env\n'
' cpenv activate ./relative/path/to/my_env\n'
' cpenv activate my_env my_module\n'
)
click.echo(examples)
return
if skip_local:
cpenv.module_resolvers.remove(cpenv.resolver.module_resolver)
cpenv.module_resolvers.remove(cpenv.resolver.active_env_module_resolver)
if skip_shared:
cpenv.module_resolvers.remove(cpenv.resolver.modules_path_resolver)
try:
r = cpenv.resolve(*paths)
except cpenv.ResolveError as e:
click.echo('\n' + str(e))
return
resolved = set(r.resolved)
active_modules = set()
env = cpenv.get_active_env()
if env:
active_modules.add(env)
active_modules.update(cpenv.get_active_modules())
new_modules = resolved - active_modules
old_modules = active_modules & resolved
if old_modules and not new_modules:
click.echo(
'\nModules already active: '
+ bold(' '.join([obj.name for obj in old_modules]))
)
return
if env and contains_env(new_modules):
click.echo('\nUse bold(exit) to leave your active environment first.')
return
click.echo('\nResolved the following modules...')
click.echo(format_objects(r.resolved))
r.activate()
click.echo(blue('\nLaunching subshell...'))
modules = sorted(resolved | active_modules, key=_type_and_name)
prompt = ':'.join([obj.name for obj in modules])
shell.launch(prompt) |
def create(name_or_path, config):
'''Create a new environment.'''
if not name_or_path:
ctx = click.get_current_context()
click.echo(ctx.get_help())
examples = (
'\nExamples:\n'
' cpenv create my_env\n'
' cpenv create ./relative/path/to/my_env\n'
' cpenv create my_env --config ./relative/path/to/config\n'
' cpenv create my_env --config [email protected]:user/config.git\n'
)
click.echo(examples)
return
click.echo(
blue('Creating a new virtual environment ' + name_or_path)
)
try:
env = cpenv.create(name_or_path, config)
except Exception as e:
click.echo(bold_red('FAILED TO CREATE ENVIRONMENT!'))
click.echo(e)
else:
click.echo(bold_green('Successfully created environment!'))
click.echo(blue('Launching subshell'))
cpenv.activate(env)
shell.launch(env.name) |
def remove(name_or_path):
'''Remove an environment'''
click.echo()
try:
r = cpenv.resolve(name_or_path)
except cpenv.ResolveError as e:
click.echo(e)
return
obj = r.resolved[0]
if not isinstance(obj, cpenv.VirtualEnvironment):
click.echo('{} is a module. Use `cpenv module remove` instead.')
return
click.echo(format_objects([obj]))
click.echo()
user_confirmed = click.confirm(
red('Are you sure you want to remove this environment?')
)
if user_confirmed:
click.echo('Attempting to remove...', nl=False)
try:
obj.remove()
except Exception as e:
click.echo(bold_red('FAIL'))
click.echo(e)
else:
click.echo(bold_green('OK!')) |
def list_():
'''List available environments and modules'''
click.echo('Cached Environments')
environments = list(EnvironmentCache)
click.echo(format_objects(environments, children=False)) |
def add(path):
'''Add an environment to the cache. Allows you to activate the environment
by name instead of by full path'''
click.echo('\nAdding {} to cache......'.format(path), nl=False)
try:
r = cpenv.resolve(path)
except Exception as e:
click.echo(bold_red('FAILED'))
click.echo(e)
return
if isinstance(r.resolved[0], cpenv.VirtualEnvironment):
EnvironmentCache.add(r.resolved[0])
EnvironmentCache.save()
click.echo(bold_green('OK!')) |
def remove(path):
'''Remove a cached environment. Removed paths will no longer be able to
be activated by name'''
r = cpenv.resolve(path)
if isinstance(r.resolved[0], cpenv.VirtualEnvironment):
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() |
def create(name_or_path, config):
'''Create a new template module.
You can also specify a filesystem path like "./modules/new_module"
'''
click.echo('Creating module {}...'.format(name_or_path), nl=False)
try:
module = cpenv.create_module(name_or_path, config)
except Exception as e:
click.echo(bold_red('FAILED'))
raise
else:
click.echo(bold_green('OK!'))
click.echo('Browse to your new module and make some changes.')
click.echo("When you're ready add the module to an environment:")
click.echo(' cpenv module add my_module ./path/to/my_module')
click.echo('Or track your module on git and add it directly from the repo:')
click.echo(' cpenv module add my_module [email protected]:user/my_module.git') |
def add(name, path, branch, type):
'''Add a module to an environment. PATH can be a git repository path or
a filesystem path. '''
if not name and not path:
ctx = click.get_current_context()
click.echo(ctx.get_help())
examples = (
'\nExamples:\n'
' cpenv module add my_module ./path/to/my_module\n'
' cpenv module add my_module [email protected]:user/my_module.git'
' cpenv module add my_module [email protected]:user/my_module.git --branch=master --type=shared'
)
click.echo(examples)
return
if not name:
click.echo('Missing required argument: name')
return
if not path:
click.echo('Missing required argument: path')
env = cpenv.get_active_env()
if type=='local':
if not env:
click.echo('\nActivate an environment to add a local module.\n')
return
if click.confirm('\nAdd {} to active env {}?'.format(name, env.name)):
click.echo('Adding module...', nl=False)
try:
env.add_module(name, path, branch)
except:
click.echo(bold_red('FAILED'))
raise
else:
click.echo(bold_green('OK!'))
return
module_paths = cpenv.get_module_paths()
click.echo('\nAvailable module paths:\n')
for i, mod_path in enumerate(module_paths):
click.echo(' {}. {}'.format(i, mod_path))
choice = click.prompt(
'Where do you want to add your module?',
type=int,
default=0
)
module_root = module_paths[choice]
module_path = utils.unipath(module_root, name)
click.echo('Creating module {}...'.format(module_path), nl=False)
try:
cpenv.create_module(module_path, path, branch)
except:
click.echo(bold_red('FAILED'))
raise
else:
click.echo(bold_green('OK!')) |
def remove(name, local):
'''Remove a module named NAME. Will remove the first resolved module named NAME. You can also specify a full path to a module. Use the --local option
to ensure removal of modules local to the currently active environment.'''
click.echo()
if not local: # Use resolver to find module
try:
r = cpenv.resolve(name)
except cpenv.ResolveError as e:
click.echo(e)
return
obj = r.resolved[0]
else: # Try to find module in active environment
env = cpenv.get_active_env()
if not env:
click.echo('You must activate an env to remove local modules')
return
mod = env.get_module(name)
if not mod:
click.echo('Failed to resolve module: ' + name)
return
obj = mod
if isinstance(obj, cpenv.VirtualEnvironment):
click.echo('{} is an environment. Use `cpenv remove` instead.')
return
click.echo(format_objects([obj]))
click.echo()
user_confirmed = click.confirm(
red('Are you sure you want to remove this module?')
)
if user_confirmed:
click.echo('Attempting to remove...', nl=False)
try:
obj.remove()
except Exception as e:
click.echo(bold_red('FAILED'))
click.echo(e)
else:
click.echo(bold_green('OK!')) |
def localize(name):
'''Copy a global module to the active environment.'''
env = cpenv.get_active_env()
if not env:
click.echo('You need to activate an environment first.')
return
try:
r = cpenv.resolve(name)
except cpenv.ResolveError as e:
click.echo('\n' + str(e))
module = r.resolved[0]
if isinstance(module, cpenv.VirtualEnvironment):
click.echo('\nCan only localize a module not an environment')
return
active_modules = cpenv.get_active_modules()
if module in active_modules:
click.echo('\nCan not localize an active module.')
return
if module in env.get_modules():
click.echo('\n{} is already local to {}'.format(module.name, env.name))
return
if click.confirm('\nAdd {} to env {}?'.format(module.name, env.name)):
click.echo('Adding module...', nl=False)
try:
module = env.add_module(module.name, module.path)
except:
click.echo(bold_red('FAILED'))
raise
else:
click.echo(bold_green('OK!'))
click.echo('\nActivate the localize module:')
click.echo(' cpenv activate {} {}'.format(env.name, module.name)) |
def path_resolver(resolver, path):
'''Resolves VirtualEnvironments with a relative or absolute path'''
path = unipath(path)
if is_environment(path):
return VirtualEnvironment(path)
raise ResolveError |
def home_resolver(resolver, path):
'''Resolves VirtualEnvironments in CPENV_HOME'''
from .api import get_home_path
path = unipath(get_home_path(), path)
if is_environment(path):
return VirtualEnvironment(path)
raise ResolveError |
def cache_resolver(resolver, path):
'''Resolves VirtualEnvironments in EnvironmentCache'''
env = resolver.cache.find(path)
if env:
return env
raise ResolveError |
def module_resolver(resolver, path):
'''Resolves module in previously resolved environment.'''
if resolver.resolved:
if isinstance(resolver.resolved[0], VirtualEnvironment):
env = resolver.resolved[0]
mod = env.get_module(path)
if mod:
return mod
raise ResolveError |
def modules_path_resolver(resolver, path):
'''Resolves modules in CPENV_MODULES path and CPENV_HOME/modules'''
from .api import get_module_paths
for module_dir in get_module_paths():
mod_path = unipath(module_dir, path)
if is_module(mod_path):
return Module(mod_path)
raise ResolveError |
def active_env_module_resolver(resolver, path):
'''Resolves modules in currently active environment.'''
from .api import get_active_env
env = get_active_env()
if not env:
raise ResolveError
mod = env.get_module(path)
if not mod:
raise ResolveError
return mod |
def redirect_resolver(resolver, path):
'''Resolves environment from .cpenv file...recursively walks up the tree
in attempt to find a .cpenv file'''
if not os.path.exists(path):
raise ResolveError
if os.path.isfile(path):
path = os.path.dirname(path)
for root, _, _ in walk_up(path):
if is_redirecting(root):
env_paths = redirect_to_env_paths(unipath(root, '.cpenv'))
r = Resolver(*env_paths)
return r.resolve()
raise ResolveError |
def _engine_affinity(obj):
"""Which engine or engines are preferred for processing this object
Returns: (location, weight)
location (integer or tuple): engine id (or in the case of a distributed
array a tuple (engine_id_list, distaxis)).
weight(integer): Proportional to the cost of moving the object to a
different engine. Currently just taken to be the size of data.
"""
from distob import engine
this_engine = engine.eid
if isinstance(obj, numbers.Number) or obj is None:
return (this_engine, 0)
elif hasattr(obj, '__engine_affinity__'):
# This case includes Remote subclasses and DistArray
return obj.__engine_affinity__
else:
return (this_engine, _rough_size(obj)) |
def _ufunc_move_input(obj, location, bshape):
"""Copy ufunc input `obj` to new engine location(s) unless obj is scalar.
If the input is requested to be distributed to multiple engines, this
function will also take care of broadcasting along the distributed axis.
If the input obj is a scalar, it will be passed through unchanged.
Args:
obj (array_like or scalar): one of the inputs to a ufunc
location (integer or tuple): If an integer, this specifies a single
engine id to which an array input should be moved. If it is a tuple,
location[0] is a list of engine ids for distributing the array input
and location[1] an integer indicating which axis should be distributed.
bshape (tuple): The shape to which the input will ultimately be broadcast
Returns:
array_like or RemoteArray or DistArray or scalar
"""
if (not hasattr(type(obj), '__array_interface__') and
not isinstance(obj, Remote) and
(isinstance(obj, string_types) or
not isinstance(obj, Sequence))):
# then treat it as a scalar
return obj
from distob import engine
this_engine = engine.eid
if location == this_engine:
# move obj to the local host, if not already here
if isinstance(obj, Remote) or isinstance(obj, DistArray):
return gather(obj)
else:
return obj
elif isinstance(location, numbers.Integral):
# move obj to a single remote engine
if isinstance(obj, Remote) and obj._ref.id.engine == location:
#print('no data movement needed!')
return obj
obj = gather(obj)
return _directed_scatter(obj, axis=None, destination=location)
else:
# location is a tuple (list of engine ids, distaxis) indicating that
# obj should be distributed.
engine_ids, distaxis = location
if not isinstance(obj, DistArray):
gather(obj)
if isinstance(obj, Sequence):
obj = np.array(obj)
if obj.ndim < len(bshape):
ix = (np.newaxis,)*(len(bshape)-obj.ndim) + (slice(None),)*obj.ndim
obj = obj[ix]
if (isinstance(obj, DistArray) and distaxis == obj._distaxis and
engine_ids == [ra._ref.id.engine for ra in obj._subarrays]):
#print('no data movement needed!')
return obj
obj = gather(obj)
if obj.shape[distaxis] == 1:
# broadcast this axis across engines
subarrays = [_directed_scatter(obj, None, m) for m in engine_ids]
return DistArray(subarrays, distaxis)
else:
return _directed_scatter(obj, distaxis, destination=engine_ids) |
def _ufunc_dispatch(ufunc, method, i, inputs, **kwargs):
"""Route ufunc execution intelligently to local host or remote engine(s)
depending on where the inputs are, to minimize the need to move data.
Args:
see numpy documentation for __numpy_ufunc__
"""
#__print_ufunc(ufunc, method, i, inputs, **kwargs)
if 'out' in kwargs and kwargs['out'] is not None:
raise Error('for distributed ufuncs `out=` is not yet implemented')
nin = 2 if ufunc is np.dot else ufunc.nin
if nin is 1 and method == '__call__':
return vectorize(ufunc.__call__)(inputs[0], **kwargs)
elif nin is 2 and method == '__call__':
from distob import engine
here = engine.eid
# Choose best location for the computation, possibly distributed:
locs, weights = zip(*[_engine_affinity(a) for a in inputs])
# for DistArrays, adjust preferred distaxis to account for broadcasting
bshape = _broadcast_shape(*inputs)
locs = list(locs)
for i, loc in enumerate(locs):
if isinstance(loc, _TupleType):
num_new_axes = len(bshape) - inputs[i].ndim
if num_new_axes > 0:
locs[i] = (locs[i][0], locs[i][1] + num_new_axes)
if ufunc is np.dot:
locs = [here if isinstance(m, _TupleType) else m for m in locs]
if locs[0] == locs[1]:
location = locs[0]
else:
# TODO: More accurately penalize the increased data movement if we
# choose to distribute an axis that requires broadcasting.
smallest = 0 if weights[0] <= weights[1] else 1
largest = 1 - smallest
if locs[0] is here or locs[1] is here:
location = here if weights[0] == weights[1] else locs[largest]
else:
# Both inputs are on remote engines. With the current
# implementation, data on one remote engine can only be moved
# to another remote engine via the client. Cost accordingly:
if weights[smallest]*2 < weights[largest] + weights[smallest]:
location = locs[largest]
else:
location = here
# Move both inputs to the chosen location:
inputs = [_ufunc_move_input(a, location, bshape) for a in inputs]
# Execute computation:
if location is here:
return ufunc.__call__(inputs[0], inputs[1], **kwargs)
else:
if isinstance(location, numbers.Integral):
# location is a single remote engine
return call(ufunc.__call__, inputs[0], inputs[1], **kwargs)
else:
# location is a tuple (list of engine ids, distaxis) implying
# that the moved inputs are now distributed arrays (or scalar)
engine_ids, distaxis = location
n = len(engine_ids)
is_dist = tuple(isinstance(a, DistArray) for a in inputs)
assert(is_dist[0] or is_dist[1])
for i in 0, 1:
if is_dist[i]:
ndim = inputs[i].ndim
assert(inputs[i]._distaxis == distaxis)
assert(inputs[i]._n == n)
def _remote_ucall(inputs, **kwargs):
"""(Executed on a remote or local engine) call the ufunc"""
return ufunc.__call__(inputs[0], inputs[1], **kwargs)
results = []
kwargs = kwargs.copy()
kwargs['block'] = False
kwargs['prefer_local'] = False
for j in range(n):
subinputs = tuple(inputs[i]._subarrays[j] if
is_dist[i] else inputs[i] for i in (0, 1))
results.append(call(_remote_ucall, subinputs, **kwargs))
results = [convert_result(ar) for ar in results]
return DistArray(results, distaxis)
elif ufunc.nin > 2:
raise Error(u'Distributing ufuncs with >2 inputs is not yet supported')
else:
raise Error(u'Distributed ufunc.%s() is not yet implemented' % method) |
def transpose(a, axes=None):
"""Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
"""
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if axes is None:
axes = range(a.ndim - 1, -1, -1)
axes = list(axes)
if len(set(axes)) < len(axes):
raise ValueError("repeated axis in transpose")
if sorted(axes) != list(range(a.ndim)):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes) |
def rollaxis(a, axis, start=0):
"""Roll the specified axis backwards, until it lies in a given position.
Args:
a (array_like): Input array.
axis (int): The axis to roll backwards. The positions of the other axes
do not change relative to one another.
start (int, optional): The axis is rolled until it lies before this
position. The default, 0, results in a "complete" roll.
Returns:
res (ndarray)
"""
if isinstance(a, np.ndarray):
return np.rollaxis(a, axis, start)
if axis not in range(a.ndim):
raise ValueError(
'rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim))
if start not in range(a.ndim + 1):
raise ValueError(
'rollaxis: start (%d) must be >=0 and < %d' % (axis, a.ndim+1))
axes = list(range(a.ndim))
axes.remove(axis)
axes.insert(start, axis)
return transpose(a, axes) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.