language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def run(self,#join tabular results back to the finv
vlay_raw, #finv vlay (to join results to)
df_raw=None,
cid=None, #linking column/field name
#data cleaning
relabel = 'ari', #how to relable event fields using the ttl values
#None: no relabling
#aep: use aep values (this is typically teh form already)
#ari: convert to ari values
keep_fnl = 'all', #list of field names to keep from the vlay (or 'all' to keep all)
layname = None,
):
"""
todo: clean this up and switch over to joinattributestable algo
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('djoin')
if cid is None: cid = self.cid
if layname is None: layname=self.resname
if layname is None: layname = 'djoin_%s_%s'%(self.tag, vlay_raw.name())
assert isinstance(layname, str), 'got bad type on layname: %s'%type(layname)
if df_raw is None: df_raw=self.data_d[self.fp_attn]
#=======================================================================
# get data
#=======================================================================
lkp_df = self._prep_table(df_raw, relabel, log=log)
vlay_df = self._prep_vlay(vlay_raw, keep_fnl, log=log)
#=======================================================================
# join data
#=======================================================================
res_df = self.fancy_join(vlay_df, lkp_df, logger=log)
#=======================================================================
# generate hte new layer--------
#=======================================================================
geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)
#reformat column names as strings
df = res_df.copy()
df.columns = res_df.columns.astype(str)
res_vlay = self.vlay_new_df2(df, geo_d=geo_d, crs = vlay_raw.crs(),
layname=layname, logger=log)
"""
view(df)
view(df.isna())
"""
log.info('finished on \'%s\''%res_vlay.name())
return res_vlay | def run(self,#join tabular results back to the finv
vlay_raw, #finv vlay (to join results to)
df_raw=None,
cid=None, #linking column/field name
#data cleaning
relabel = 'ari', #how to relable event fields using the ttl values
#None: no relabling
#aep: use aep values (this is typically teh form already)
#ari: convert to ari values
keep_fnl = 'all', #list of field names to keep from the vlay (or 'all' to keep all)
layname = None,
):
"""
todo: clean this up and switch over to joinattributestable algo
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('djoin')
if cid is None: cid = self.cid
if layname is None: layname=self.resname
if layname is None: layname = 'djoin_%s_%s'%(self.tag, vlay_raw.name())
assert isinstance(layname, str), 'got bad type on layname: %s'%type(layname)
if df_raw is None: df_raw=self.data_d[self.fp_attn]
#=======================================================================
# get data
#=======================================================================
lkp_df = self._prep_table(df_raw, relabel, log=log)
vlay_df = self._prep_vlay(vlay_raw, keep_fnl, log=log)
#=======================================================================
# join data
#=======================================================================
res_df = self.fancy_join(vlay_df, lkp_df, logger=log)
#=======================================================================
# generate hte new layer--------
#=======================================================================
geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)
#reformat column names as strings
df = res_df.copy()
df.columns = res_df.columns.astype(str)
res_vlay = self.vlay_new_df2(df, geo_d=geo_d, crs = vlay_raw.crs(),
layname=layname, logger=log)
"""
view(df)
view(df.isna())
"""
log.info('finished on \'%s\''%res_vlay.name())
return res_vlay |
Python | def fancy_join(self,
lkp_df, vlay_df,
logger=None):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('fancy_join')
#=======================================================================
# #check key intersect
#=======================================================================
"""we allow the results lkp_df to be smaller than the vector layer"""
l = set(lkp_df[self.cid]).difference(vlay_df[self.cid])
if not len(l)==0:
bx = ~lkp_df[self.cid].isin(vlay_df[self.cid])
with pd.option_context('display.max_rows', None,
'display.max_columns', None,
'display.width',1000):
log.debug('missing entries %i (of %i)\n%s'%(bx.sum(), len(bx), lkp_df[bx]))
raise Error('%i (of %i) \'%s\' entries in the results not found in the finv_vlay. .see logger: \n %s'%(
len(l), len(lkp_df), self.cid, l))
#=======================================================================
# column intersect
#=======================================================================
icols = set(lkp_df.columns).union(vlay_df.columns)
icols.remove(self.cid)
if len(icols)>0:
log.warning('got %i overlapping columns...taking data from vlay \n %s'%(len(icols), icols))
#===========================================================================
# join-----------
#===========================================================================
boolidx = vlay_df[self.cid].isin(lkp_df[self.cid].values)
res_df = vlay_df.loc[boolidx, :].merge(lkp_df,
how='inner', #only use intersect keys
on = self.cid,
validate= '1:1', #check if merge keys are unique in right dataset
indicator=False, #flag where the rows came from (_merge)
)
assert res_df.columns.is_unique
#reset index
assert res_df['fid'].is_unique
res_df = res_df.set_index('fid', drop=True).sort_index(axis=0)
if not np.array_equal(res_df.index, vlay_df.index):
"""aoi slicing?"""
log.warning('index mismatch')
log.info('merged %s w/ %s to get %s'%(
str(vlay_df.shape), str(lkp_df.shape), str(res_df.shape)))
return res_df | def fancy_join(self,
lkp_df, vlay_df,
logger=None):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('fancy_join')
#=======================================================================
# #check key intersect
#=======================================================================
"""we allow the results lkp_df to be smaller than the vector layer"""
l = set(lkp_df[self.cid]).difference(vlay_df[self.cid])
if not len(l)==0:
bx = ~lkp_df[self.cid].isin(vlay_df[self.cid])
with pd.option_context('display.max_rows', None,
'display.max_columns', None,
'display.width',1000):
log.debug('missing entries %i (of %i)\n%s'%(bx.sum(), len(bx), lkp_df[bx]))
raise Error('%i (of %i) \'%s\' entries in the results not found in the finv_vlay. .see logger: \n %s'%(
len(l), len(lkp_df), self.cid, l))
#=======================================================================
# column intersect
#=======================================================================
icols = set(lkp_df.columns).union(vlay_df.columns)
icols.remove(self.cid)
if len(icols)>0:
log.warning('got %i overlapping columns...taking data from vlay \n %s'%(len(icols), icols))
#===========================================================================
# join-----------
#===========================================================================
boolidx = vlay_df[self.cid].isin(lkp_df[self.cid].values)
res_df = vlay_df.loc[boolidx, :].merge(lkp_df,
how='inner', #only use intersect keys
on = self.cid,
validate= '1:1', #check if merge keys are unique in right dataset
indicator=False, #flag where the rows came from (_merge)
)
assert res_df.columns.is_unique
#reset index
assert res_df['fid'].is_unique
res_df = res_df.set_index('fid', drop=True).sort_index(axis=0)
if not np.array_equal(res_df.index, vlay_df.index):
"""aoi slicing?"""
log.warning('index mismatch')
log.info('merged %s w/ %s to get %s'%(
str(vlay_df.shape), str(lkp_df.shape), str(res_df.shape)))
return res_df |
Python | def typeset_df(self, #typeset and prepthe prameter frame usig handles
df_raw, #parameters: candidates
logger=None,
):
"""
we transpose the frame to preserve types on columns
"""
if logger is None: logger=self.logger
log=logger.getChild('typeset_df')
#=======================================================================
# precheck
#=======================================================================
assert isinstance(df_raw, pd.DataFrame)
assert 'name' in df_raw.columns, 'must specify a name row'
assert df_raw['name'].iloc[0] == self.name, 'base name does not match'
log.info('on %s'%str(df_raw.shape))
#=======================================================================
# typeset
#=======================================================================
#loop and collect as typeset series
d = dict( )
for colName, col in df_raw.copy().items():
assert hasattr(self, colName), colName
#retrieve default from class
classVal = getattr(self, colName)
#special for booleans
if classVal.__class__.__name__=='bool':
d[colName] = col.str.lower().replace({'true':True,'false':False}).astype(classVal.__class__)
else:
d[colName] = col.astype(classVal.__class__)
df = pd.concat(list(d.values()), axis=1, keys=d.keys())
log.debug('finished w/ %i typeset: \n %s'%(len(df), df.dtypes.to_dict()))
return df | def typeset_df(self, #typeset and prepthe prameter frame usig handles
df_raw, #parameters: candidates
logger=None,
):
"""
we transpose the frame to preserve types on columns
"""
if logger is None: logger=self.logger
log=logger.getChild('typeset_df')
#=======================================================================
# precheck
#=======================================================================
assert isinstance(df_raw, pd.DataFrame)
assert 'name' in df_raw.columns, 'must specify a name row'
assert df_raw['name'].iloc[0] == self.name, 'base name does not match'
log.info('on %s'%str(df_raw.shape))
#=======================================================================
# typeset
#=======================================================================
#loop and collect as typeset series
d = dict( )
for colName, col in df_raw.copy().items():
assert hasattr(self, colName), colName
#retrieve default from class
classVal = getattr(self, colName)
#special for booleans
if classVal.__class__.__name__=='bool':
d[colName] = col.str.lower().replace({'true':True,'false':False}).astype(classVal.__class__)
else:
d[colName] = col.astype(classVal.__class__)
df = pd.concat(list(d.values()), axis=1, keys=d.keys())
log.debug('finished w/ %i typeset: \n %s'%(len(df), df.dtypes.to_dict()))
return df |
Python | def build_candidates(self, #build all the candidate models
df_raw, #frame with parameters {par:candidate} (always absolute filepaths)
base_cf_fp = None, #base control file
base_cf_fn = None, #ase control file name
logger=None,
out_basedir = None, #directory where all the candidate models will be saved
copyDataFiles=True, #whether to copy over all datafiles
absolute_fp=None, #status of the base control file (df_raw is always absolute)
):
"""
WARNING: this reads candidates from a datarame, not the control file
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('bcan')
if base_cf_fp is None: base_cf_fp=self.cf_fp
if out_basedir is None: out_basedir = self.out_dir
if absolute_fp is None: absolute_fp=self.absolute_fp
if base_cf_fn is None:
base_cf_fn = os.path.splitext(os.path.basename(base_cf_fp))[0]
log.info('on %s'%base_cf_fp)
#=======================================================================
# prep the data
#=======================================================================
df1 = self.typeset_df(df_raw, logger=log)
#=======================================================================
# #check base pars
#=======================================================================
"""checking whats on the compile tab (first row)
against what weve loaded from the cf on the setup tab"""
pars_d = df1.iloc[0,:].to_dict()
for k,v in pars_d.items():
assert hasattr(self, k), 'worker missing requested attribute \'%s\''%k
#classVal = getattr(self, k)
#assert v == classVal, 'mismatch on \'%s\': %s != %s'%(k, classVal, v)
#df = df1.iloc[1:,:] #drop the base
df = df1.copy()
#=======================================================================
# remove results from main
#=======================================================================
"""never using results from the main
moved to Load"""
#=======================================================================
# get sections
#=======================================================================
attn_sect_d = self.get_sections(df1.columns.tolist(), logger=log)
#=======================================================================
# loop and create each candidate
#=======================================================================
log.info('creating %i candidate models'%len(df.T))
meta_lib = dict()
#collect init kwargs for candidates
kwargs = {attn:getattr(self,attn) for attn in [
#'absolute_fp', #need to convert everything to absolute
'feedback']}
#loop on rows bur presever types
first=True
for i, (mtag, pars_d) in enumerate(df.to_dict(orient='index').items()):
log = logger.getChild('bcan.%i'%i)
log.debug('on %s'%mtag)
#===================================================================
# #setup the new directory
#===================================================================
out_dir = os.path.join(out_basedir, mtag)
if os.path.exists(out_dir):
assert self.overwrite
else:
os.makedirs(out_dir)
#===================================================================
# prep the control file
#===================================================================
#copy over the base cf_fp
cf_fp = os.path.join(out_dir,'%s_%s.txt'%(base_cf_fn, mtag))
_ = shutil.copyfile(base_cf_fp, cf_fp)
log.info('copied cf to %s'%cf_fp)
#===================================================================
# prep the base control file
#===================================================================
if first:
#handle relatives
if not self.absolute_fp:
#change everything to absolute
self._cfFile_relative(cf_fp=cf_fp, logger=log)
#tell subsequent siblings to use this one
base_cf_fp = cf_fp
first=False
#===================================================================
# prep the parameters
#===================================================================
pars_d1 = dict()
for attn, attv in pars_d.items():
sectName = attn_sect_d[attn]
if not sectName in pars_d1:
pars_d1[sectName]=dict()
pars_d1[sectName][attn] = attv
#===================================================================
# #update the control file w/ the new paramters
#===================================================================
log.debug('building %s'%pars_d['name'])
with CandidateModel(out_dir=out_dir, cf_fp=cf_fp, logger=log, mtag=mtag, name=pars_d['name'], **kwargs) as wrkr:
#load the base control file
"""
wrkr.absolute_fp
wrkr.base_dir
"""
wrkr.init_model()
#update base control file with new values
pars_d2 = wrkr.upd_cfPars(pars_d1)
#copy over all the data files
if copyDataFiles:
pars_d3 = wrkr.copy_datafiles(cfPars_d=pars_d2)
else:
pars_d3 = pars_d2
#special fix for colors
if 'color' in pars_d3['plotting']:
pars_d3['plotting']['color'] = pars_d3['plotting']['color'].replace('#','?')
#convert to strings again
pars_d3 = {sect:{k:str(v) for k,v in att_d.items()} for sect, att_d in pars_d3.items()}
#add notes
txt = '#generated sensitivity analysis candidate %s.%s'%(mtag, self.resname)
#save to file (this should overwrite everthing)
wrkr.set_cf_pars({k:tuple([att_d,txt]) for k,att_d in pars_d3.items()})
log.debug('finished on %s'%wrkr.name)
meta_lib[mtag] = {'cf_fp':cf_fp, 'name':pars_d['name'], 'new_pars':len(pars_d)-1, 'pars_d':pars_d3}
#=======================================================================
# wrap
#=======================================================================
kstr = 'finished building %i to \n %s'%(len(meta_lib), out_basedir) + '\n'
for mtag, d in meta_lib.items():
kstr = kstr + ' \'%s\':r\'%s\',\n'%(mtag, d['cf_fp'])
log.info(kstr)
return meta_lib | def build_candidates(self, #build all the candidate models
df_raw, #frame with parameters {par:candidate} (always absolute filepaths)
base_cf_fp = None, #base control file
base_cf_fn = None, #ase control file name
logger=None,
out_basedir = None, #directory where all the candidate models will be saved
copyDataFiles=True, #whether to copy over all datafiles
absolute_fp=None, #status of the base control file (df_raw is always absolute)
):
"""
WARNING: this reads candidates from a datarame, not the control file
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('bcan')
if base_cf_fp is None: base_cf_fp=self.cf_fp
if out_basedir is None: out_basedir = self.out_dir
if absolute_fp is None: absolute_fp=self.absolute_fp
if base_cf_fn is None:
base_cf_fn = os.path.splitext(os.path.basename(base_cf_fp))[0]
log.info('on %s'%base_cf_fp)
#=======================================================================
# prep the data
#=======================================================================
df1 = self.typeset_df(df_raw, logger=log)
#=======================================================================
# #check base pars
#=======================================================================
"""checking whats on the compile tab (first row)
against what weve loaded from the cf on the setup tab"""
pars_d = df1.iloc[0,:].to_dict()
for k,v in pars_d.items():
assert hasattr(self, k), 'worker missing requested attribute \'%s\''%k
#classVal = getattr(self, k)
#assert v == classVal, 'mismatch on \'%s\': %s != %s'%(k, classVal, v)
#df = df1.iloc[1:,:] #drop the base
df = df1.copy()
#=======================================================================
# remove results from main
#=======================================================================
"""never using results from the main
moved to Load"""
#=======================================================================
# get sections
#=======================================================================
attn_sect_d = self.get_sections(df1.columns.tolist(), logger=log)
#=======================================================================
# loop and create each candidate
#=======================================================================
log.info('creating %i candidate models'%len(df.T))
meta_lib = dict()
#collect init kwargs for candidates
kwargs = {attn:getattr(self,attn) for attn in [
#'absolute_fp', #need to convert everything to absolute
'feedback']}
#loop on rows bur presever types
first=True
for i, (mtag, pars_d) in enumerate(df.to_dict(orient='index').items()):
log = logger.getChild('bcan.%i'%i)
log.debug('on %s'%mtag)
#===================================================================
# #setup the new directory
#===================================================================
out_dir = os.path.join(out_basedir, mtag)
if os.path.exists(out_dir):
assert self.overwrite
else:
os.makedirs(out_dir)
#===================================================================
# prep the control file
#===================================================================
#copy over the base cf_fp
cf_fp = os.path.join(out_dir,'%s_%s.txt'%(base_cf_fn, mtag))
_ = shutil.copyfile(base_cf_fp, cf_fp)
log.info('copied cf to %s'%cf_fp)
#===================================================================
# prep the base control file
#===================================================================
if first:
#handle relatives
if not self.absolute_fp:
#change everything to absolute
self._cfFile_relative(cf_fp=cf_fp, logger=log)
#tell subsequent siblings to use this one
base_cf_fp = cf_fp
first=False
#===================================================================
# prep the parameters
#===================================================================
pars_d1 = dict()
for attn, attv in pars_d.items():
sectName = attn_sect_d[attn]
if not sectName in pars_d1:
pars_d1[sectName]=dict()
pars_d1[sectName][attn] = attv
#===================================================================
# #update the control file w/ the new paramters
#===================================================================
log.debug('building %s'%pars_d['name'])
with CandidateModel(out_dir=out_dir, cf_fp=cf_fp, logger=log, mtag=mtag, name=pars_d['name'], **kwargs) as wrkr:
#load the base control file
"""
wrkr.absolute_fp
wrkr.base_dir
"""
wrkr.init_model()
#update base control file with new values
pars_d2 = wrkr.upd_cfPars(pars_d1)
#copy over all the data files
if copyDataFiles:
pars_d3 = wrkr.copy_datafiles(cfPars_d=pars_d2)
else:
pars_d3 = pars_d2
#special fix for colors
if 'color' in pars_d3['plotting']:
pars_d3['plotting']['color'] = pars_d3['plotting']['color'].replace('#','?')
#convert to strings again
pars_d3 = {sect:{k:str(v) for k,v in att_d.items()} for sect, att_d in pars_d3.items()}
#add notes
txt = '#generated sensitivity analysis candidate %s.%s'%(mtag, self.resname)
#save to file (this should overwrite everthing)
wrkr.set_cf_pars({k:tuple([att_d,txt]) for k,att_d in pars_d3.items()})
log.debug('finished on %s'%wrkr.name)
meta_lib[mtag] = {'cf_fp':cf_fp, 'name':pars_d['name'], 'new_pars':len(pars_d)-1, 'pars_d':pars_d3}
#=======================================================================
# wrap
#=======================================================================
kstr = 'finished building %i to \n %s'%(len(meta_lib), out_basedir) + '\n'
for mtag, d in meta_lib.items():
kstr = kstr + ' \'%s\':r\'%s\',\n'%(mtag, d['cf_fp'])
log.info(kstr)
return meta_lib |
Python | def init_qgis(self, #instantiate qgis
gui = False):
"""
WARNING: need to hold this app somewhere. call in the module you're working in (scripts)
"""
log = self.logger.getChild('init_qgis')
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
log.info(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate') | def init_qgis(self, #instantiate qgis
gui = False):
"""
WARNING: need to hold this app somewhere. call in the module you're working in (scripts)
"""
log = self.logger.getChild('init_qgis')
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
log.info(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate') |
Python | def load_vlay(self,
fp,
logger=None,
providerLib='ogr',
aoi_vlay = None,
allow_none=True, #control check in saveselectedfeastures
addSpatialIndex=True,
uriParams_d = {'encoding':'System',
'type':'csv',
'maxFields':'10000',
'detectTypes':'yes',
'geomType':'none',
'subsetIndex':'no',
'watchFile':'no'},
):
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
if logger is None: logger = self.logger
log = logger.getChild('load_vlay')
basefn = os.path.splitext(os.path.split(fp)[1])[0]
log.debug('loading from %s'%fp)
if providerLib == 'delimitedtext':
#constructor
uriW = QgsDataSourceUri()
for k,v in uriParams_d.items():
uriW.setParam(k,v)
uri = r'file:///' + fp.replace('\\','/') +'?'+ str(uriW.encodedUri(), 'utf-8')
else:
uri = fp
vlay_raw = QgsVectorLayer(uri,basefn,providerLib)
if providerLib == 'delimitedtext':
return vlay_raw
#=======================================================================
# # checks
#=======================================================================
assert isinstance(vlay_raw, QgsVectorLayer)
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if not providerLib == 'delimitedtext':
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
assert isinstance(self.mstore, QgsMapLayerStore)
"""only add intermediate layers to store
self.mstore.addMapLayer(vlay_raw)"""
if not vlay_raw.crs()==self.qproj.crs():
log.warning('crs mismatch: \n %s\n %s'%(
vlay_raw.crs(), self.qproj.crs()))
#=======================================================================
# aoi slice
#=======================================================================
if isinstance(aoi_vlay, QgsVectorLayer):
log.info('slicing by aoi %s'%aoi_vlay.name())
vlay = self.selectbylocation(vlay_raw, aoi_vlay, allow_none=allow_none,
logger=log, result_type='layer')
#check for no selection
if vlay is None:
return None
vlay.setName(vlay_raw.name()) #reset the name
#clear original from memory
self.mstore.addMapLayer(vlay_raw)
self.mstore.removeMapLayers([vlay_raw])
else:
vlay = vlay_raw
#=======================================================================
# clean------
#=======================================================================
#spatial index
if addSpatialIndex and (not vlay_raw.hasSpatialIndex()==QgsFeatureSource.SpatialIndexPresent):
self.createspatialindex(vlay_raw, logger=log)
#=======================================================================
# wrap
#=======================================================================
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay | def load_vlay(self,
fp,
logger=None,
providerLib='ogr',
aoi_vlay = None,
allow_none=True, #control check in saveselectedfeastures
addSpatialIndex=True,
uriParams_d = {'encoding':'System',
'type':'csv',
'maxFields':'10000',
'detectTypes':'yes',
'geomType':'none',
'subsetIndex':'no',
'watchFile':'no'},
):
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
if logger is None: logger = self.logger
log = logger.getChild('load_vlay')
basefn = os.path.splitext(os.path.split(fp)[1])[0]
log.debug('loading from %s'%fp)
if providerLib == 'delimitedtext':
#constructor
uriW = QgsDataSourceUri()
for k,v in uriParams_d.items():
uriW.setParam(k,v)
uri = r'file:///' + fp.replace('\\','/') +'?'+ str(uriW.encodedUri(), 'utf-8')
else:
uri = fp
vlay_raw = QgsVectorLayer(uri,basefn,providerLib)
if providerLib == 'delimitedtext':
return vlay_raw
#=======================================================================
# # checks
#=======================================================================
assert isinstance(vlay_raw, QgsVectorLayer)
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if not providerLib == 'delimitedtext':
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
assert isinstance(self.mstore, QgsMapLayerStore)
"""only add intermediate layers to store
self.mstore.addMapLayer(vlay_raw)"""
if not vlay_raw.crs()==self.qproj.crs():
log.warning('crs mismatch: \n %s\n %s'%(
vlay_raw.crs(), self.qproj.crs()))
#=======================================================================
# aoi slice
#=======================================================================
if isinstance(aoi_vlay, QgsVectorLayer):
log.info('slicing by aoi %s'%aoi_vlay.name())
vlay = self.selectbylocation(vlay_raw, aoi_vlay, allow_none=allow_none,
logger=log, result_type='layer')
#check for no selection
if vlay is None:
return None
vlay.setName(vlay_raw.name()) #reset the name
#clear original from memory
self.mstore.addMapLayer(vlay_raw)
self.mstore.removeMapLayers([vlay_raw])
else:
vlay = vlay_raw
#=======================================================================
# clean------
#=======================================================================
#spatial index
if addSpatialIndex and (not vlay_raw.hasSpatialIndex()==QgsFeatureSource.SpatialIndexPresent):
self.createspatialindex(vlay_raw, logger=log)
#=======================================================================
# wrap
#=======================================================================
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay |
Python | def vlay_new_df2(self, #build a vlay from a df
df_raw,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
crs=None,
gkey = None, #data field linking with geo_d (if None.. uses df index)
layname='df',
index = False, #whether to include the index as a field
logger=None,
):
"""
performance enhancement over vlay_new_df
simpler, clearer
although less versatile
"""
#=======================================================================
# setup
#=======================================================================
if crs is None: crs = self.qproj.crs()
if logger is None: logger = self.logger
log = logger.getChild('vlay_new_df')
#=======================================================================
# index fix
#=======================================================================
df = df_raw.copy()
if index:
if not df.index.name is None:
coln = df.index.name
df.index.name = None
else:
coln = 'index'
df[coln] = df.index
#=======================================================================
# precheck
#=======================================================================
#make sure none of hte field names execeed the driver limitations
max_len = fieldn_max_d[self.driverName]
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length=%i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, self.driverName, df_raw.columns[boolcol].tolist()))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
assert df.columns.is_unique, 'got duplicated column names: \n %s'%(df.columns.tolist())
#check datatypes
assert np.array_equal(df.columns, df.columns.astype(str)), 'got non-string column names'
#check the geometry
if not geo_d is None:
assert isinstance(geo_d, dict)
if not gkey is None:
assert gkey in df_raw.columns
#assert 'int' in df_raw[gkey].dtype.name
#check gkey match
l = set(df_raw[gkey].drop_duplicates()).difference(geo_d.keys())
assert len(l)==0, 'missing %i \'%s\' keys in geo_d: %s'%(len(l), gkey, l)
#against index
else:
#check gkey match
l = set(df_raw.index).difference(geo_d.keys())
assert len(l)==0, 'missing %i (of %i) fid keys in geo_d: %s'%(len(l), len(df_raw), l)
#===========================================================================
# assemble the fields
#===========================================================================
#column name and python type
fields_d = {coln:np_to_pytype(col.dtype) for coln, col in df.items()}
#fields container
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# assemble the features
#=======================================================================
#convert form of data
feats_d = dict()
for fid, row in df.iterrows():
feat = QgsFeature(qfields, fid)
#loop and add data
for fieldn, value in row.items():
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
if not geo_d is None:
if gkey is None:
gobj = geo_d[fid]
else:
gobj = geo_d[row[gkey]]
feat.setGeometry(gobj)
#stor eit
feats_d[fid]=feat
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
#=======================================================================
# get the geo type
#=======================================================================\
if not geo_d is None:
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
else:
gtype='None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype,
crs,
layname,
qfields,
list(feats_d.values()),
logger=log,
)
self.createspatialindex(vlay, logger=log)
#=======================================================================
# post check
#=======================================================================
if not geo_d is None:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
return vlay | def vlay_new_df2(self, #build a vlay from a df
df_raw,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
crs=None,
gkey = None, #data field linking with geo_d (if None.. uses df index)
layname='df',
index = False, #whether to include the index as a field
logger=None,
):
"""
performance enhancement over vlay_new_df
simpler, clearer
although less versatile
"""
#=======================================================================
# setup
#=======================================================================
if crs is None: crs = self.qproj.crs()
if logger is None: logger = self.logger
log = logger.getChild('vlay_new_df')
#=======================================================================
# index fix
#=======================================================================
df = df_raw.copy()
if index:
if not df.index.name is None:
coln = df.index.name
df.index.name = None
else:
coln = 'index'
df[coln] = df.index
#=======================================================================
# precheck
#=======================================================================
#make sure none of hte field names execeed the driver limitations
max_len = fieldn_max_d[self.driverName]
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length=%i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, self.driverName, df_raw.columns[boolcol].tolist()))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
assert df.columns.is_unique, 'got duplicated column names: \n %s'%(df.columns.tolist())
#check datatypes
assert np.array_equal(df.columns, df.columns.astype(str)), 'got non-string column names'
#check the geometry
if not geo_d is None:
assert isinstance(geo_d, dict)
if not gkey is None:
assert gkey in df_raw.columns
#assert 'int' in df_raw[gkey].dtype.name
#check gkey match
l = set(df_raw[gkey].drop_duplicates()).difference(geo_d.keys())
assert len(l)==0, 'missing %i \'%s\' keys in geo_d: %s'%(len(l), gkey, l)
#against index
else:
#check gkey match
l = set(df_raw.index).difference(geo_d.keys())
assert len(l)==0, 'missing %i (of %i) fid keys in geo_d: %s'%(len(l), len(df_raw), l)
#===========================================================================
# assemble the fields
#===========================================================================
#column name and python type
fields_d = {coln:np_to_pytype(col.dtype) for coln, col in df.items()}
#fields container
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# assemble the features
#=======================================================================
#convert form of data
feats_d = dict()
for fid, row in df.iterrows():
feat = QgsFeature(qfields, fid)
#loop and add data
for fieldn, value in row.items():
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
if not geo_d is None:
if gkey is None:
gobj = geo_d[fid]
else:
gobj = geo_d[row[gkey]]
feat.setGeometry(gobj)
#stor eit
feats_d[fid]=feat
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
#=======================================================================
# get the geo type
#=======================================================================\
if not geo_d is None:
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
else:
gtype='None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype,
crs,
layname,
qfields,
list(feats_d.values()),
logger=log,
)
self.createspatialindex(vlay, logger=log)
#=======================================================================
# post check
#=======================================================================
if not geo_d is None:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
return vlay |
Python | def cliprasterwithpolygon(self,
rlay_raw,
poly_vlay,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
"""
clipping a raster layer with a polygon mask using gdalwarp
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
layname = '%s_clipd'%rlay_raw.name()
algo_nm = 'gdal:cliprasterbymasklayer'
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlay_raw, QgsRasterLayer)
assert isinstance(poly_vlay, QgsVectorLayer)
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'ALPHA_BAND' : False,
'CROP_TO_CUTLINE' : True,
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'KEEP_RESOLUTION' : True,
'MASK' : poly_vlay,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SET_RESOLUTION' : False,
'SOURCE_CRS' : None,
'TARGET_CRS' : None,
'X_RESOLUTION' : None,
'Y_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay | def cliprasterwithpolygon(self,
rlay_raw,
poly_vlay,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
"""
clipping a raster layer with a polygon mask using gdalwarp
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
layname = '%s_clipd'%rlay_raw.name()
algo_nm = 'gdal:cliprasterbymasklayer'
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlay_raw, QgsRasterLayer)
assert isinstance(poly_vlay, QgsVectorLayer)
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'ALPHA_BAND' : False,
'CROP_TO_CUTLINE' : True,
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'KEEP_RESOLUTION' : True,
'MASK' : poly_vlay,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SET_RESOLUTION' : False,
'SOURCE_CRS' : None,
'TARGET_CRS' : None,
'X_RESOLUTION' : None,
'Y_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay |
Python | def qrastercalculator(self, #QGIS native raster calculator
formula,
ref_layer = None, #reference layer
logger=None,
layname=None,
):
"""executes the algorhithim... better to use the constructor directly
QgsRasterCalculator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('qrastercalculator')
algo_nm = 'qgis:rastercalculator'
if layname is None:
if ref_layer is None:
layname = 'qrastercalculator'
else:
layname = '%s_calc'%ref_layer.name()
#=======================================================================
# execute
#=======================================================================
"""
formula = '\'haz_100yr_cT2@1\'-\'dtm_cT1@1\''
"""
ins_d = { 'CELLSIZE' : 0,
'CRS' : None,
'EXPRESSION' : formula,
'EXTENT' : None,
'LAYERS' : [ref_layer], #referecnce layer
'OUTPUT' : 'TEMPORARY_OUTPUT' }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay | def qrastercalculator(self, #QGIS native raster calculator
formula,
ref_layer = None, #reference layer
logger=None,
layname=None,
):
"""executes the algorhithim... better to use the constructor directly
QgsRasterCalculator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('qrastercalculator')
algo_nm = 'qgis:rastercalculator'
if layname is None:
if ref_layer is None:
layname = 'qrastercalculator'
else:
layname = '%s_calc'%ref_layer.name()
#=======================================================================
# execute
#=======================================================================
"""
formula = '\'haz_100yr_cT2@1\'-\'dtm_cT1@1\''
"""
ins_d = { 'CELLSIZE' : 0,
'CRS' : None,
'EXPRESSION' : formula,
'EXTENT' : None,
'LAYERS' : [ref_layer], #referecnce layer
'OUTPUT' : 'TEMPORARY_OUTPUT' }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay |
Python | def polygonfromlayerextent(self,
vlay,
round_to=0, #adds a buffer to the result?
logger=None,
layname=None):
"""
This algorithm takes a map layer and generates a new vector layer with the
minimum bounding box (rectangle polygon with N-S orientation) that covers the input layer.
Optionally, the extent can be enlarged to a rounded value.
"""
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('polygonfromlayerextent')
algo_nm = 'qgis:polygonfromlayerextent'
if layname is None:
layname = '%s_exts'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'ROUND_TO':round_to}
log.debug('\'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay | def polygonfromlayerextent(self,
vlay,
round_to=0, #adds a buffer to the result?
logger=None,
layname=None):
"""
This algorithm takes a map layer and generates a new vector layer with the
minimum bounding box (rectangle polygon with N-S orientation) that covers the input layer.
Optionally, the extent can be enlarged to a rounded value.
"""
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('polygonfromlayerextent')
algo_nm = 'qgis:polygonfromlayerextent'
if layname is None:
layname = '%s_exts'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'ROUND_TO':round_to}
log.debug('\'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay |
Python | def upd_prog(self, #advanced progress handling
prog_raw, #pass None to reset
method='raw', #whether to append value to the progress
):
#=======================================================================
# defaults
#=======================================================================
#get the current progress
progress = self.progress()
#===================================================================
# prechecks
#===================================================================
#make sure we have some slots connected
"""not sure how to do this"""
#=======================================================================
# reseting
#=======================================================================
if prog_raw is None:
"""
would be nice to reset the progressBar.. .but that would be complicated
"""
self.setProgress(0)
return
#=======================================================================
# setting
#=======================================================================
if method=='append':
prog = min(progress + prog_raw, 100)
elif method=='raw':
prog = prog_raw
elif method == 'portion':
rem_prog = 100-progress
prog = progress + rem_prog*(prog_raw/100)
assert prog<=100
#===================================================================
# emit signalling
#===================================================================
self.setProgress(prog) | def upd_prog(self, #advanced progress handling
prog_raw, #pass None to reset
method='raw', #whether to append value to the progress
):
#=======================================================================
# defaults
#=======================================================================
#get the current progress
progress = self.progress()
#===================================================================
# prechecks
#===================================================================
#make sure we have some slots connected
"""not sure how to do this"""
#=======================================================================
# reseting
#=======================================================================
if prog_raw is None:
"""
would be nice to reset the progressBar.. .but that would be complicated
"""
self.setProgress(0)
return
#=======================================================================
# setting
#=======================================================================
if method=='append':
prog = min(progress + prog_raw, 100)
elif method=='raw':
prog = prog_raw
elif method == 'portion':
rem_prog = 100-progress
prog = progress + rem_prog*(prog_raw/100)
assert prog<=100
#===================================================================
# emit signalling
#===================================================================
self.setProgress(prog) |
Python | def vlay_get_fdf( #pull all the feature data and place into a df
vlay,
fmt='df', #result fomrat key.
#dict: {fid:{fieldname:value}}
#df: index=fids, columns=fieldnames
#limiters
request = None, #request to pull data. for more customized requestes.
fieldn_l = None, #or field name list. for generic requests
#modifiers
reindex = None, #optinal field name to reindex df by
#expectations
expect_all_real = False, #whether to expect all real results
allow_none = False,
db_f = False,
logger=mod_logger,
feedback=MyFeedBackQ()):
"""
performance improvement
Warning: requests with getFeatures arent working as expected for memory layers
this could be combined with vlay_get_feats()
also see vlay_get_fdata() (for a single column)
RETURNS
a dictionary in the Qgis attribute dictionary format:
key: generally feat.id()
value: a dictionary of {field name: attribute value}
"""
#===========================================================================
# setups and defaults
#===========================================================================
log = logger.getChild('vlay_get_fdf')
assert isinstance(vlay, QgsVectorLayer)
all_fnl = [fieldn.name() for fieldn in vlay.fields().toList()]
if fieldn_l is None: #use all the fields
fieldn_l = all_fnl
else:
vlay_check(vlay, fieldn_l, logger=logger, db_f=db_f)
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# prechecks
#===========================================================================
if not reindex is None:
if not reindex in fieldn_l:
raise Error('requested reindexer \'%s\' is not a field name'%reindex)
if not vlay.dataProvider().featureCount()>0:
raise Error('no features!')
if len(fieldn_l) == 0:
raise Error('no fields!')
if fmt=='dict' and not (len(fieldn_l)==len(all_fnl)):
raise Error('dict results dont respect field slicing')
assert hasattr(feedback, 'setProgress')
#===========================================================================
# build the request
#===========================================================================
feedback.setProgress(2)
if request is None:
"""WARNING: this doesnt seem to be slicing the fields.
see Alg().deletecolumns()
but this will re-key things
request = QgsFeatureRequest().setSubsetOfAttributes(fieldn_l,vlay.fields())"""
request = QgsFeatureRequest()
#never want geometry
request = request.setFlags(QgsFeatureRequest.NoGeometry)
log.debug('extracting data from \'%s\' on fields: %s'%(vlay.name(), fieldn_l))
#===========================================================================
# loop through each feature and extract the data
#===========================================================================
fid_attvs = dict() #{fid : {fieldn:value}}
fcnt = vlay.dataProvider().featureCount()
for indxr, feat in enumerate(vlay.getFeatures(request)):
#zip values
fid_attvs[feat.id()] = feat.attributes()
feedback.setProgress((indxr/fcnt)*90)
#===========================================================================
# post checks
#===========================================================================
if not len(fid_attvs) == vlay.dataProvider().featureCount():
log.debug('data result length does not match feature count')
if not request.filterType()==3: #check if a filter fids was passed
"""todo: add check to see if the fiter request length matches tresult"""
raise Error('no filter and data length mismatch')
#check the field lengthes
if not len(all_fnl) == len(feat.attributes()):
raise Error('field length mismatch')
#empty check 1
if len(fid_attvs) == 0:
log.warning('failed to get any data on layer \'%s\' with request'%vlay.name())
if not allow_none:
raise Error('no data found!')
else:
if fmt == 'dict':
return dict()
elif fmt == 'df':
return pd.DataFrame()
else:
raise Error('unexpected fmt type')
#===========================================================================
# result formatting
#===========================================================================
log.debug('got %i data elements for \'%s\''%(
len(fid_attvs), vlay.name()))
if fmt == 'dict':
return fid_attvs
elif fmt=='df':
#build the dict
df_raw = pd.DataFrame.from_dict(fid_attvs, orient='index', columns=all_fnl)
#handle column slicing and Qnulls
"""if the requester worked... we probably wouldnt have to do this"""
df = df_raw.loc[:, tuple(fieldn_l)].replace([NULL], np.nan)
feedback.setProgress(95)
if isinstance(reindex, str):
"""
reindex='zid'
view(df)
"""
#try and add the index (fids) as a data column
try:
df = df.join(pd.Series(df.index,index=df.index, name='fid'))
except:
log.debug('failed to preserve the fids.. column already there?')
#re-index by the passed key... should copy the fids over to 'index
df = df.set_index(reindex, drop=True)
log.debug('reindexed data by \'%s\''%reindex)
return df
else:
raise Error('unrecognized fmt kwarg') | def vlay_get_fdf( #pull all the feature data and place into a df
vlay,
fmt='df', #result fomrat key.
#dict: {fid:{fieldname:value}}
#df: index=fids, columns=fieldnames
#limiters
request = None, #request to pull data. for more customized requestes.
fieldn_l = None, #or field name list. for generic requests
#modifiers
reindex = None, #optinal field name to reindex df by
#expectations
expect_all_real = False, #whether to expect all real results
allow_none = False,
db_f = False,
logger=mod_logger,
feedback=MyFeedBackQ()):
"""
performance improvement
Warning: requests with getFeatures arent working as expected for memory layers
this could be combined with vlay_get_feats()
also see vlay_get_fdata() (for a single column)
RETURNS
a dictionary in the Qgis attribute dictionary format:
key: generally feat.id()
value: a dictionary of {field name: attribute value}
"""
#===========================================================================
# setups and defaults
#===========================================================================
log = logger.getChild('vlay_get_fdf')
assert isinstance(vlay, QgsVectorLayer)
all_fnl = [fieldn.name() for fieldn in vlay.fields().toList()]
if fieldn_l is None: #use all the fields
fieldn_l = all_fnl
else:
vlay_check(vlay, fieldn_l, logger=logger, db_f=db_f)
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# prechecks
#===========================================================================
if not reindex is None:
if not reindex in fieldn_l:
raise Error('requested reindexer \'%s\' is not a field name'%reindex)
if not vlay.dataProvider().featureCount()>0:
raise Error('no features!')
if len(fieldn_l) == 0:
raise Error('no fields!')
if fmt=='dict' and not (len(fieldn_l)==len(all_fnl)):
raise Error('dict results dont respect field slicing')
assert hasattr(feedback, 'setProgress')
#===========================================================================
# build the request
#===========================================================================
feedback.setProgress(2)
if request is None:
"""WARNING: this doesnt seem to be slicing the fields.
see Alg().deletecolumns()
but this will re-key things
request = QgsFeatureRequest().setSubsetOfAttributes(fieldn_l,vlay.fields())"""
request = QgsFeatureRequest()
#never want geometry
request = request.setFlags(QgsFeatureRequest.NoGeometry)
log.debug('extracting data from \'%s\' on fields: %s'%(vlay.name(), fieldn_l))
#===========================================================================
# loop through each feature and extract the data
#===========================================================================
fid_attvs = dict() #{fid : {fieldn:value}}
fcnt = vlay.dataProvider().featureCount()
for indxr, feat in enumerate(vlay.getFeatures(request)):
#zip values
fid_attvs[feat.id()] = feat.attributes()
feedback.setProgress((indxr/fcnt)*90)
#===========================================================================
# post checks
#===========================================================================
if not len(fid_attvs) == vlay.dataProvider().featureCount():
log.debug('data result length does not match feature count')
if not request.filterType()==3: #check if a filter fids was passed
"""todo: add check to see if the fiter request length matches tresult"""
raise Error('no filter and data length mismatch')
#check the field lengthes
if not len(all_fnl) == len(feat.attributes()):
raise Error('field length mismatch')
#empty check 1
if len(fid_attvs) == 0:
log.warning('failed to get any data on layer \'%s\' with request'%vlay.name())
if not allow_none:
raise Error('no data found!')
else:
if fmt == 'dict':
return dict()
elif fmt == 'df':
return pd.DataFrame()
else:
raise Error('unexpected fmt type')
#===========================================================================
# result formatting
#===========================================================================
log.debug('got %i data elements for \'%s\''%(
len(fid_attvs), vlay.name()))
if fmt == 'dict':
return fid_attvs
elif fmt=='df':
#build the dict
df_raw = pd.DataFrame.from_dict(fid_attvs, orient='index', columns=all_fnl)
#handle column slicing and Qnulls
"""if the requester worked... we probably wouldnt have to do this"""
df = df_raw.loc[:, tuple(fieldn_l)].replace([NULL], np.nan)
feedback.setProgress(95)
if isinstance(reindex, str):
"""
reindex='zid'
view(df)
"""
#try and add the index (fids) as a data column
try:
df = df.join(pd.Series(df.index,index=df.index, name='fid'))
except:
log.debug('failed to preserve the fids.. column already there?')
#re-index by the passed key... should copy the fids over to 'index
df = df.set_index(reindex, drop=True)
log.debug('reindexed data by \'%s\''%reindex)
return df
else:
raise Error('unrecognized fmt kwarg') |
Python | def vlay_get_fdata( #get data for a single field from all the features
vlay,
fieldn = None, #get a field name. 'None' returns a dictionary of np.nan
geopropn = None, #get a geometry property
geo_obj = False, #whether to just get the geometry object
request = None, #additional requester (limiting fids). fieldn still required. additional flags added
selected= False, #whether to limit data to just those selected features
fmt = 'dict', #format to return results in
#'singleton' expect and aprovide a unitary value
rekey = None, #field name to rekey dictionary like results by
expect_all_real = False, #whether to expect all real results
dropna = False, #whether to drop nulls from the results
allow_none = False,
logger = mod_logger, db_f=False):
"""
TODO: combine this with vlay_get_fdatas
consider combining with vlay_get_feats
I'm not sure how this will handle requests w/ expressions
"""
log = logger.getChild('vlay_get_fdata')
if request is None:
request = QgsFeatureRequest()
#===========================================================================
# prechecks
#===========================================================================
if geo_obj:
if fmt == 'df': raise IOError
if not geopropn is None: raise IOError
else:
assert fieldn in [f.name() for f in vlay.fields()], 'requested field not found: %s'%fieldn
if dropna:
if expect_all_real:
raise Error('cant expect_all_reals AND dropna')
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# build the request
#===========================================================================
#no geometry
if (geopropn is None) and (not geo_obj):
if fieldn is None:
raise Error('no field name provided')
request = request.setFlags(QgsFeatureRequest.NoGeometry)
request = request.setSubsetOfAttributes([fieldn],vlay.fields())
else:
request = request.setNoAttributes() #dont get any attributes
#===========================================================================
# selection limited
#===========================================================================
if selected:
"""
todo: check if there is already a fid filter placed on the reuqester
"""
log.debug('limiting data pull to %i selected features on \'%s\''%(
vlay.selectedFeatureCount(), vlay.name()))
sfids = vlay.selectedFeatureIds()
request = request.setFilterFids(sfids)
#===========================================================================
# loop through and collect hte data
#===========================================================================
#if db_f: req_log(request, logger=log)
d = dict() #empty container for results
for feat in vlay.getFeatures(request):
#=======================================================================
# get geometry
#=======================================================================
if geo_obj:
d[feat.id()] = feat.geometry()
#=======================================================================
# get a geometry property
#=======================================================================
elif not geopropn is None:
geo = feat.geometry()
func = getattr(geo, geopropn) #get the method
d[feat.id()] = func() #call the method and store
#=======================================================================
# field request
#=======================================================================
else:
#empty shortcut
if qisnull(feat.attribute(fieldn)):
d[feat.id()] = np.nan
else: #pull real data
d[feat.id()] = feat.attribute(fieldn)
log.debug('retrieved %i attributes from features on \'%s\''%(
len(d), vlay.name()))
#===========================================================================
# null handling
#===========================================================================
if selected:
if not len(d) == vlay.selectedFeatureCount():
raise Error('failed to get data matching %i selected features'%(
vlay.selectedFeatureCount()))
if expect_all_real:
boolar = pd.isnull(np.array(list(d.values())))
if np.any(boolar):
raise Error('got %i nulls'%boolar.sum())
if dropna:
"""faster to use dfs?"""
log.debug('dropping nulls from %i'%len(d))
d2 = dict()
for k, v in d.items():
if np.isnan(v):
continue
d2[k] = v
d = d2 #reset
#===========================================================================
# post checks
#===========================================================================
if len(d) == 0:
log.warning('got no results! from \'%s\''%(
vlay.name()))
if not allow_none:
raise Error('allow_none=FALSE and no results')
"""
view(vlay)
"""
#===========================================================================
# rekey
#===========================================================================
if isinstance(rekey, str):
assert fmt=='dict'
d, _ = vlay_key_convert(vlay, d, rekey, id1_type='fid', logger=log)
#===========================================================================
# results
#===========================================================================
if fmt == 'dict':
return d
elif fmt == 'df':
return pd.DataFrame(pd.Series(d, name=fieldn))
elif fmt == 'singleton':
if not len(d)==1:
raise Error('expected singleton')
return next(iter(d.values()))
elif fmt == 'ser':
return pd.Series(d, name=fieldn)
else:
raise IOError | def vlay_get_fdata( #get data for a single field from all the features
vlay,
fieldn = None, #get a field name. 'None' returns a dictionary of np.nan
geopropn = None, #get a geometry property
geo_obj = False, #whether to just get the geometry object
request = None, #additional requester (limiting fids). fieldn still required. additional flags added
selected= False, #whether to limit data to just those selected features
fmt = 'dict', #format to return results in
#'singleton' expect and aprovide a unitary value
rekey = None, #field name to rekey dictionary like results by
expect_all_real = False, #whether to expect all real results
dropna = False, #whether to drop nulls from the results
allow_none = False,
logger = mod_logger, db_f=False):
"""
TODO: combine this with vlay_get_fdatas
consider combining with vlay_get_feats
I'm not sure how this will handle requests w/ expressions
"""
log = logger.getChild('vlay_get_fdata')
if request is None:
request = QgsFeatureRequest()
#===========================================================================
# prechecks
#===========================================================================
if geo_obj:
if fmt == 'df': raise IOError
if not geopropn is None: raise IOError
else:
assert fieldn in [f.name() for f in vlay.fields()], 'requested field not found: %s'%fieldn
if dropna:
if expect_all_real:
raise Error('cant expect_all_reals AND dropna')
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# build the request
#===========================================================================
#no geometry
if (geopropn is None) and (not geo_obj):
if fieldn is None:
raise Error('no field name provided')
request = request.setFlags(QgsFeatureRequest.NoGeometry)
request = request.setSubsetOfAttributes([fieldn],vlay.fields())
else:
request = request.setNoAttributes() #dont get any attributes
#===========================================================================
# selection limited
#===========================================================================
if selected:
"""
todo: check if there is already a fid filter placed on the reuqester
"""
log.debug('limiting data pull to %i selected features on \'%s\''%(
vlay.selectedFeatureCount(), vlay.name()))
sfids = vlay.selectedFeatureIds()
request = request.setFilterFids(sfids)
#===========================================================================
# loop through and collect hte data
#===========================================================================
#if db_f: req_log(request, logger=log)
d = dict() #empty container for results
for feat in vlay.getFeatures(request):
#=======================================================================
# get geometry
#=======================================================================
if geo_obj:
d[feat.id()] = feat.geometry()
#=======================================================================
# get a geometry property
#=======================================================================
elif not geopropn is None:
geo = feat.geometry()
func = getattr(geo, geopropn) #get the method
d[feat.id()] = func() #call the method and store
#=======================================================================
# field request
#=======================================================================
else:
#empty shortcut
if qisnull(feat.attribute(fieldn)):
d[feat.id()] = np.nan
else: #pull real data
d[feat.id()] = feat.attribute(fieldn)
log.debug('retrieved %i attributes from features on \'%s\''%(
len(d), vlay.name()))
#===========================================================================
# null handling
#===========================================================================
if selected:
if not len(d) == vlay.selectedFeatureCount():
raise Error('failed to get data matching %i selected features'%(
vlay.selectedFeatureCount()))
if expect_all_real:
boolar = pd.isnull(np.array(list(d.values())))
if np.any(boolar):
raise Error('got %i nulls'%boolar.sum())
if dropna:
"""faster to use dfs?"""
log.debug('dropping nulls from %i'%len(d))
d2 = dict()
for k, v in d.items():
if np.isnan(v):
continue
d2[k] = v
d = d2 #reset
#===========================================================================
# post checks
#===========================================================================
if len(d) == 0:
log.warning('got no results! from \'%s\''%(
vlay.name()))
if not allow_none:
raise Error('allow_none=FALSE and no results')
"""
view(vlay)
"""
#===========================================================================
# rekey
#===========================================================================
if isinstance(rekey, str):
assert fmt=='dict'
d, _ = vlay_key_convert(vlay, d, rekey, id1_type='fid', logger=log)
#===========================================================================
# results
#===========================================================================
if fmt == 'dict':
return d
elif fmt == 'df':
return pd.DataFrame(pd.Series(d, name=fieldn))
elif fmt == 'singleton':
if not len(d)==1:
raise Error('expected singleton')
return next(iter(d.values()))
elif fmt == 'ser':
return pd.Series(d, name=fieldn)
else:
raise IOError |
Python | def ptype_to_qtype(py_type, logger=mod_logger): #get the qtype corresponding to the passed pytype
"""useful for buildign Qt objects
really, this is a reverse
py_type=str
"""
if not inspect.isclass(py_type):
logger.error('got unexpected type \'%s\''%type(py_type))
raise Error('bad type')
#build a QVariant object from this python type class, then return its type
try:
qv = QVariant(py_type())
except:
logger.error('failed to build QVariant from \'%s\''%type(py_type))
raise IOError
"""
#get the type
QMetaType.typeName(qv.type())
"""
return qv.type() | def ptype_to_qtype(py_type, logger=mod_logger): #get the qtype corresponding to the passed pytype
"""useful for buildign Qt objects
really, this is a reverse
py_type=str
"""
if not inspect.isclass(py_type):
logger.error('got unexpected type \'%s\''%type(py_type))
raise Error('bad type')
#build a QVariant object from this python type class, then return its type
try:
qv = QVariant(py_type())
except:
logger.error('failed to build QVariant from \'%s\''%type(py_type))
raise IOError
"""
#get the type
QMetaType.typeName(qv.type())
"""
return qv.type() |
Python | def addAll(self, #add all connections
qini_fp = None, #users settings path
newCons_d = None, #connections to load
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('addAll')
if newCons_d is None: newCons_d = self.newCons_d
if qini_fp is None: qini_fp = self.qini_fp
log.debug('addAll on %i'%len(newCons_d))
#=======================================================================
# initilize settings
#=======================================================================
assert os.path.exists(qini_fp), 'bad settings filepath: %s'%qini_fp
usets = QgsSettings(qini_fp, QSettings.IniFormat)
#navigate to group1
"""all connectins are in the qgis group"""
usets.beginGroup('qgis')
#=======================================================================
# loop and add each connection
#=======================================================================
for cname, newPars_d in copy.copy(newCons_d).items():
#navigate to this group within the settings
usets.beginGroup(newPars_d['group'])
"""TODO: add checks:
warn if this group already exists
check if connection is valid
"""
log.debug('setting %i parameters to group \"%s\' \n %s'%(
len(newPars_d), usets.group(), newPars_d))
#loop and add each setting to this group
for k, v in newPars_d.items():
if k=='group': continue
usets.setValue(k, v)
#return to the parent group
usets.endGroup()
usets.sync() #write unsaved changes to file
log.info('added %i connections: \n %s'%( len(newCons_d), list(newCons_d.keys())))
#=======================================================================
# check result
#=======================================================================
result, chk_d = self.checkSettingsGroup(newCons_d, logger=log)
assert result, 'failed to set some values \n %s'%chk_d
return newCons_d | def addAll(self, #add all connections
qini_fp = None, #users settings path
newCons_d = None, #connections to load
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('addAll')
if newCons_d is None: newCons_d = self.newCons_d
if qini_fp is None: qini_fp = self.qini_fp
log.debug('addAll on %i'%len(newCons_d))
#=======================================================================
# initilize settings
#=======================================================================
assert os.path.exists(qini_fp), 'bad settings filepath: %s'%qini_fp
usets = QgsSettings(qini_fp, QSettings.IniFormat)
#navigate to group1
"""all connectins are in the qgis group"""
usets.beginGroup('qgis')
#=======================================================================
# loop and add each connection
#=======================================================================
for cname, newPars_d in copy.copy(newCons_d).items():
#navigate to this group within the settings
usets.beginGroup(newPars_d['group'])
"""TODO: add checks:
warn if this group already exists
check if connection is valid
"""
log.debug('setting %i parameters to group \"%s\' \n %s'%(
len(newPars_d), usets.group(), newPars_d))
#loop and add each setting to this group
for k, v in newPars_d.items():
if k=='group': continue
usets.setValue(k, v)
#return to the parent group
usets.endGroup()
usets.sync() #write unsaved changes to file
log.info('added %i connections: \n %s'%( len(newCons_d), list(newCons_d.keys())))
#=======================================================================
# check result
#=======================================================================
result, chk_d = self.checkSettingsGroup(newCons_d, logger=log)
assert result, 'failed to set some values \n %s'%chk_d
return newCons_d |
Python | def displayDetails(self): #display details on the selected library
#log = self.logger.getChild('displayDetails')
#=======================================================================
# retrieve selection
#=======================================================================
#check we have a selection
if len(self.tableView.selectionModel().selectedRows())==0:
return
#get the selection index
"""should only allow 1 row.. but taking the first regardless"""
sindex = self.tableView.selectionModel().selectedRows()[0]
row = sindex.row()
#get this value
libName = self.dfModel.data(self.dfModel.index(row, 0))
#log.debug('user selected \'%s\''%libName)
self.libName = libName #set for retrieving curve details
#=======================================================================
# build data for this
#=======================================================================
df = self.vdetails_d[libName]
self.dfModel2 = pandasModel(df)
#=======================================================================
# send to the widget
#=======================================================================
self.tableView_right.setModel(self.dfModel2) | def displayDetails(self): #display details on the selected library
#log = self.logger.getChild('displayDetails')
#=======================================================================
# retrieve selection
#=======================================================================
#check we have a selection
if len(self.tableView.selectionModel().selectedRows())==0:
return
#get the selection index
"""should only allow 1 row.. but taking the first regardless"""
sindex = self.tableView.selectionModel().selectedRows()[0]
row = sindex.row()
#get this value
libName = self.dfModel.data(self.dfModel.index(row, 0))
#log.debug('user selected \'%s\''%libName)
self.libName = libName #set for retrieving curve details
#=======================================================================
# build data for this
#=======================================================================
df = self.vdetails_d[libName]
self.dfModel2 = pandasModel(df)
#=======================================================================
# send to the widget
#=======================================================================
self.tableView_right.setModel(self.dfModel2) |
Python | def displayFiles(self): #show the xls files on the treeview for the selected library
"""
called when a library is selected
"""
#check we have a selection
if len(self.tableView.selectionModel().selectedRows())==0:
return
log = self.logger.getChild('displayFiles')
#=======================================================================
# retrieve selection
#=======================================================================
#get the selection index
"""should only allow 1 row.. but taking the first regardless"""
sindex = self.tableView.selectionModel().selectedRows()[0]
row = sindex.row()
#get this value
libName = self.dfModel.data(self.dfModel.index(row, 0))
#log.debug('user selected \'%s\''%libName)
#=======================================================================
# data setup
#=======================================================================
focus_dir = self.vdata_d[libName]['basedir']
#focus_dir = r'C:\LS\03_TOOLS\CanFlood\_git\canflood\_pars\vfunc'
#=======================================================================
# #build the model
#=======================================================================
assert os.path.exists(focus_dir)
fsModel = QFileSystemModel()
fsModel.setRootPath(focus_dir)
fsModel.setNameFilters(['*.xls'])
self.fsModel = fsModel
#=======================================================================
# #tree view
#=======================================================================
self.treeView.setModel(fsModel)
self.treeView.setRootIndex(fsModel.index(focus_dir))
log.debug('connected treeView to QFileSystemModel w/: \n %s'%focus_dir)
#adjust columns
header = self.treeView.header()
header.setSectionResizeMode(0, QHeaderView.ResizeToContents)
header.setStretchLastSection(False)
#self.treeView.resizeColumnToContents(0)
#=======================================================================
# connect it
#=======================================================================
self.treeView.selectionModel().selectionChanged.connect(self.dislpayCsDetails)
"""
if not self.dfModel3 is None:
self.dfModel3.clear()
#self.tableView_bottomRight.clearSpans() #clear the table view until next trigger
"""
try: #cleanup the model
self.tableView_bottomRight.setModel(pandasModel(pd.DataFrame())) #set a dummy model
del self.dfModel3
except:pass | def displayFiles(self): #show the xls files on the treeview for the selected library
"""
called when a library is selected
"""
#check we have a selection
if len(self.tableView.selectionModel().selectedRows())==0:
return
log = self.logger.getChild('displayFiles')
#=======================================================================
# retrieve selection
#=======================================================================
#get the selection index
"""should only allow 1 row.. but taking the first regardless"""
sindex = self.tableView.selectionModel().selectedRows()[0]
row = sindex.row()
#get this value
libName = self.dfModel.data(self.dfModel.index(row, 0))
#log.debug('user selected \'%s\''%libName)
#=======================================================================
# data setup
#=======================================================================
focus_dir = self.vdata_d[libName]['basedir']
#focus_dir = r'C:\LS\03_TOOLS\CanFlood\_git\canflood\_pars\vfunc'
#=======================================================================
# #build the model
#=======================================================================
assert os.path.exists(focus_dir)
fsModel = QFileSystemModel()
fsModel.setRootPath(focus_dir)
fsModel.setNameFilters(['*.xls'])
self.fsModel = fsModel
#=======================================================================
# #tree view
#=======================================================================
self.treeView.setModel(fsModel)
self.treeView.setRootIndex(fsModel.index(focus_dir))
log.debug('connected treeView to QFileSystemModel w/: \n %s'%focus_dir)
#adjust columns
header = self.treeView.header()
header.setSectionResizeMode(0, QHeaderView.ResizeToContents)
header.setStretchLastSection(False)
#self.treeView.resizeColumnToContents(0)
#=======================================================================
# connect it
#=======================================================================
self.treeView.selectionModel().selectionChanged.connect(self.dislpayCsDetails)
"""
if not self.dfModel3 is None:
self.dfModel3.clear()
#self.tableView_bottomRight.clearSpans() #clear the table view until next trigger
"""
try: #cleanup the model
self.tableView_bottomRight.setModel(pandasModel(pd.DataFrame())) #set a dummy model
del self.dfModel3
except:pass |
Python | def dislpayCsDetails(self): #display the selected curve set (xls) details
"""called when a curve set is selected"""
#=======================================================================
# #get the selection
#=======================================================================
fileName, filePath = self._get_cset_selection()
#=======================================================================
# build data for this
#=======================================================================
#=======================================================================
# assert self.libName in self.vdata_d, self.libName
# assert fileName in self.vdata_d[self.libName]['curves_d'], 'requested filename not found: %s'%filePath
#
# data = self.vdata_d[self.libName]['curves_d'][fileName]
#=======================================================================
data = self._load_cs(filePath)
df = pd.Series(data, name='values'
).to_frame().reset_index().rename(columns={'index':'var'})
self.dfModel3 = pandasModel(df)
#=======================================================================
# send to the widget
#=======================================================================
self.tableView_bottomRight.setModel(self.dfModel3)
#adjust columns
header = self.tableView_bottomRight.horizontalHeader()
for lindex in [0]: #resize specific columns to contents
header.setSectionResizeMode(lindex, QHeaderView.ResizeToContents)
header.setStretchLastSection(True) | def dislpayCsDetails(self): #display the selected curve set (xls) details
"""called when a curve set is selected"""
#=======================================================================
# #get the selection
#=======================================================================
fileName, filePath = self._get_cset_selection()
#=======================================================================
# build data for this
#=======================================================================
#=======================================================================
# assert self.libName in self.vdata_d, self.libName
# assert fileName in self.vdata_d[self.libName]['curves_d'], 'requested filename not found: %s'%filePath
#
# data = self.vdata_d[self.libName]['curves_d'][fileName]
#=======================================================================
data = self._load_cs(filePath)
df = pd.Series(data, name='values'
).to_frame().reset_index().rename(columns={'index':'var'})
self.dfModel3 = pandasModel(df)
#=======================================================================
# send to the widget
#=======================================================================
self.tableView_bottomRight.setModel(self.dfModel3)
#adjust columns
header = self.tableView_bottomRight.horizontalHeader()
for lindex in [0]: #resize specific columns to contents
header.setSectionResizeMode(lindex, QHeaderView.ResizeToContents)
header.setStretchLastSection(True) |
Python | def to_finv(self, #clean a raw vlay an add some finv colums
in_vlay,
drop_colns=['ogc_fid', 'fid'], #optional columns to drop from df
new_data = {},
newLayname = None,
logger=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('to_finv')
if newLayname is None: newLayname = 'finv_%s'%in_vlay.name()
#=======================================================================
# precheck
#=======================================================================
assert isinstance(in_vlay, QgsVectorLayer)
dp = in_vlay.dataProvider()
log.info('on %s w/ %i feats and %i new colums'%(in_vlay.name(), dp.featureCount(), len(new_data)))
self.feedback.upd_prog(20)
#=======================================================================
# extract data
#=======================================================================
df_raw = vlay_get_fdf(in_vlay, logger=log)
geo_d = vlay_get_fdata(in_vlay, geo_obj=True, logger=log)
self.feedback.upd_prog(50)
#=======================================================================
# clean
#=======================================================================
#drop specified columns
df0 = df_raw.drop(drop_colns,axis=1, errors='ignore')
#convert empty strings to null
df1 = df0.replace(to_replace='', value=np.nan)
log.info('replaced %i (of %i) null values'%(df1.isna().sum().sum(), df1.size))
#drop empty fields
df2 = df1.dropna(axis=1, how='all')
log.info('dropped %i empty columns'%(len(df1.columns) - len(df2.columns)))
self.feedback.upd_prog(60)
#=======================================================================
# add fields
#=======================================================================
#build the new data
log.info('adding field data:\n %s'%new_data)
#join the two
res_df = df2.join(pd.DataFrame(index=df_raw.index, data=new_data))
self.feedback.upd_prog(70)
#=======================================================================
# chekc data
#=======================================================================
"""" no? not for this intermediate function?
self.check_finv()
"""
#=======================================================================
# reconstruct layer
#=======================================================================
finv_vlay = self.vlay_new_df2(res_df, geo_d=geo_d, crs=in_vlay.crs(),
logger=log,
layname = newLayname)
#=======================================================================
# wrap
#=======================================================================
fcnt = finv_vlay.dataProvider().featureCount()
assert fcnt == dp.featureCount()
log.info('finished w/ \'%s\' w/ %i feats'%(finv_vlay.name(), fcnt))
self.feedback.upd_prog(99)
return finv_vlay | def to_finv(self, #clean a raw vlay an add some finv colums
in_vlay,
drop_colns=['ogc_fid', 'fid'], #optional columns to drop from df
new_data = {},
newLayname = None,
logger=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('to_finv')
if newLayname is None: newLayname = 'finv_%s'%in_vlay.name()
#=======================================================================
# precheck
#=======================================================================
assert isinstance(in_vlay, QgsVectorLayer)
dp = in_vlay.dataProvider()
log.info('on %s w/ %i feats and %i new colums'%(in_vlay.name(), dp.featureCount(), len(new_data)))
self.feedback.upd_prog(20)
#=======================================================================
# extract data
#=======================================================================
df_raw = vlay_get_fdf(in_vlay, logger=log)
geo_d = vlay_get_fdata(in_vlay, geo_obj=True, logger=log)
self.feedback.upd_prog(50)
#=======================================================================
# clean
#=======================================================================
#drop specified columns
df0 = df_raw.drop(drop_colns,axis=1, errors='ignore')
#convert empty strings to null
df1 = df0.replace(to_replace='', value=np.nan)
log.info('replaced %i (of %i) null values'%(df1.isna().sum().sum(), df1.size))
#drop empty fields
df2 = df1.dropna(axis=1, how='all')
log.info('dropped %i empty columns'%(len(df1.columns) - len(df2.columns)))
self.feedback.upd_prog(60)
#=======================================================================
# add fields
#=======================================================================
#build the new data
log.info('adding field data:\n %s'%new_data)
#join the two
res_df = df2.join(pd.DataFrame(index=df_raw.index, data=new_data))
self.feedback.upd_prog(70)
#=======================================================================
# chekc data
#=======================================================================
"""" no? not for this intermediate function?
self.check_finv()
"""
#=======================================================================
# reconstruct layer
#=======================================================================
finv_vlay = self.vlay_new_df2(res_df, geo_d=geo_d, crs=in_vlay.crs(),
logger=log,
layname = newLayname)
#=======================================================================
# wrap
#=======================================================================
fcnt = finv_vlay.dataProvider().featureCount()
assert fcnt == dp.featureCount()
log.info('finished w/ \'%s\' w/ %i feats'%(finv_vlay.name(), fcnt))
self.feedback.upd_prog(99)
return finv_vlay |
Python | def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI.
called when user unchecks the plugin
"""
log=self.logger.getChild('unload')
#=======================================================================
# unload toolbars
#=======================================================================
self.iface.mainWindow().removeToolBar( self.toolbar )
del self.toolbar
#=======================================================================
# unload menu
#=======================================================================
d = self.actions_d['menu']
"""not sure if this is needed"""
for attn, action in d.items():
self.iface.removePluginMenu(self.menu_name, action)
log.debug('unloaded %i from the menu: %s'%(len(d), list(d.keys())))
#=======================================================================
# unload expression functions
#=======================================================================
from misc.expressionFunctions import all_funcs_l
for func in all_funcs_l:
QgsExpression.unregisterFunction(func.name())
log.debug('unloaded %i expression functions'%len(all_funcs_l)) | def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI.
called when user unchecks the plugin
"""
log=self.logger.getChild('unload')
#=======================================================================
# unload toolbars
#=======================================================================
self.iface.mainWindow().removeToolBar( self.toolbar )
del self.toolbar
#=======================================================================
# unload menu
#=======================================================================
d = self.actions_d['menu']
"""not sure if this is needed"""
for attn, action in d.items():
self.iface.removePluginMenu(self.menu_name, action)
log.debug('unloaded %i from the menu: %s'%(len(d), list(d.keys())))
#=======================================================================
# unload expression functions
#=======================================================================
from misc.expressionFunctions import all_funcs_l
for func in all_funcs_l:
QgsExpression.unregisterFunction(func.name())
log.debug('unloaded %i expression functions'%len(all_funcs_l)) |
Python | def spectrum_to_xyz(spectrum: Callable) -> ndarray:
"""
Calculate the CIE X, Y, and Z coordinates corresponding to a light source
with spectral distribution given by the function "spectrum", which is called
with a series of wavelengths between 380 and 780 nm, which returns emittance
at that wavelength in arbitrary units. The chromaticity coordinates of the
spectrum are returned, respecting the identity x+y+z=1.
:param spectrum: function returning an emittance value at a given wavelength (nm)
:return: xyz value
"""
xyz = spectrum(WAVELENGTHS_380_780) @ CIE_XYZ_380_780
xyz /= sum(xyz)
return xyz | def spectrum_to_xyz(spectrum: Callable) -> ndarray:
"""
Calculate the CIE X, Y, and Z coordinates corresponding to a light source
with spectral distribution given by the function "spectrum", which is called
with a series of wavelengths between 380 and 780 nm, which returns emittance
at that wavelength in arbitrary units. The chromaticity coordinates of the
spectrum are returned, respecting the identity x+y+z=1.
:param spectrum: function returning an emittance value at a given wavelength (nm)
:return: xyz value
"""
xyz = spectrum(WAVELENGTHS_380_780) @ CIE_XYZ_380_780
xyz /= sum(xyz)
return xyz |
Python | def gamma_correct(self, rgb) -> ndarray:
"""
Transform linear RGB values to nonlinear RGB values.
Rec. 709 is ITU-R Recommendation BT. 709 (1990) ``Basic Parameter Values for
the HDTV Standard for the Studio and for International Programme Exchange'',
formerly CCIR Rec. 709. For details see:
* http://www.poynton.com/ColorFAQ.html
* http://www.poynton.com/GammaFAQ.html
:param rgb: Linear RGB values to transform (gets modified!)
:return: The transformed nonlinear RGB values
"""
if self.value.gamma == GAMMA_REC709:
# Rec.709 gamma correction
small = rgb < GAMMA_REC709_CC
rgb[small] *= GAMMA_REC709_FACTOR
rgb[~small] = (1.099 * np.power(rgb[~small], 0.45)) - 0.099
else:
# Nonlinear colour = (Linear colour) ^ (1 / gamma)
np.power(rgb, 1.0 / self.value.gamma, out=rgb)
return rgb | def gamma_correct(self, rgb) -> ndarray:
"""
Transform linear RGB values to nonlinear RGB values.
Rec. 709 is ITU-R Recommendation BT. 709 (1990) ``Basic Parameter Values for
the HDTV Standard for the Studio and for International Programme Exchange'',
formerly CCIR Rec. 709. For details see:
* http://www.poynton.com/ColorFAQ.html
* http://www.poynton.com/GammaFAQ.html
:param rgb: Linear RGB values to transform (gets modified!)
:return: The transformed nonlinear RGB values
"""
if self.value.gamma == GAMMA_REC709:
# Rec.709 gamma correction
small = rgb < GAMMA_REC709_CC
rgb[small] *= GAMMA_REC709_FACTOR
rgb[~small] = (1.099 * np.power(rgb[~small], 0.45)) - 0.099
else:
# Nonlinear colour = (Linear colour) ^ (1 / gamma)
np.power(rgb, 1.0 / self.value.gamma, out=rgb)
return rgb |
Python | def inside_gamut(rgb: ndarray) -> bool:
"""
Test whether a requested colour is within the gamut achievable with the primaries
of the current colour system. This amounts simply to testing whether all the
primary weights are non-negative.
:param rgb: color value to test
:return: True when inside the gamut
"""
return all(rgb >= 0) | def inside_gamut(rgb: ndarray) -> bool:
"""
Test whether a requested colour is within the gamut achievable with the primaries
of the current colour system. This amounts simply to testing whether all the
primary weights are non-negative.
:param rgb: color value to test
:return: True when inside the gamut
"""
return all(rgb >= 0) |
Python | def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:
"""
Calculate, by Planck's radiation law, the emittance of a black body
of temperature temp at the given wavelength
:param wl: required wavelength (or wavelengths) in nm
:return: emittance(s)
"""
wlm = wl * 1e-9 # Wavelength to meters
return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.) | def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:
"""
Calculate, by Planck's radiation law, the emittance of a black body
of temperature temp at the given wavelength
:param wl: required wavelength (or wavelengths) in nm
:return: emittance(s)
"""
wlm = wl * 1e-9 # Wavelength to meters
return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.) |
Python | def constrain_rgb(rgb: ndarray) -> bool:
"""
If the requested RGB shade contains a negative weight for one of the primaries,
it lies outside the colour gamut accessible from the given triple of primaries.
Desaturate it by adding white, equal quantities of R, G, and B, enough to make
RGB all positive.
:param rgb: the color to constrain
:return: True if the components were modified
"""
w = - min(0, *rgb) # Amount of white needed
if w > 0:
rgb += w # Add just enough white to make r, g, b all positive
return True # Colour modified to fit RGB gamut
return False # Colour within RGB gamut | def constrain_rgb(rgb: ndarray) -> bool:
"""
If the requested RGB shade contains a negative weight for one of the primaries,
it lies outside the colour gamut accessible from the given triple of primaries.
Desaturate it by adding white, equal quantities of R, G, and B, enough to make
RGB all positive.
:param rgb: the color to constrain
:return: True if the components were modified
"""
w = - min(0, *rgb) # Amount of white needed
if w > 0:
rgb += w # Add just enough white to make r, g, b all positive
return True # Colour modified to fit RGB gamut
return False # Colour within RGB gamut |
Python | def norm_rgb(rgb: ndarray) -> ndarray:
"""
Transform linear RGB values to nonlinear RGB values.
Rec. 709 is ITU-R Recommendation BT. 709 (1990) ``Basic Parameter Values for
the HDTV Standard for the Studio and for International Programme Exchange'',
formerly CCIR Rec. 709. For details see:
* http://www.poynton.com/ColorFAQ.html
* http://www.poynton.com/GammaFAQ.html
:param rgb: the color to transform (gets modified)
:return: the modified RGB values
"""
greatest = max(rgb)
if greatest > 0:
rgb /= greatest
return rgb | def norm_rgb(rgb: ndarray) -> ndarray:
"""
Transform linear RGB values to nonlinear RGB values.
Rec. 709 is ITU-R Recommendation BT. 709 (1990) ``Basic Parameter Values for
the HDTV Standard for the Studio and for International Programme Exchange'',
formerly CCIR Rec. 709. For details see:
* http://www.poynton.com/ColorFAQ.html
* http://www.poynton.com/GammaFAQ.html
:param rgb: the color to transform (gets modified)
:return: the modified RGB values
"""
greatest = max(rgb)
if greatest > 0:
rgb /= greatest
return rgb |
Python | def _detect_leading_silence(sound, silence_threshold=20.0, chunk_size=1, relative=True):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
if relative:
dBFS = sound.dBFS
else:
dBFS = 0
assert chunk_size > 0 # to avoid infinite loop
while sound[trim_ms:trim_ms+chunk_size].dBFS < dBFS-silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms | def _detect_leading_silence(sound, silence_threshold=20.0, chunk_size=1, relative=True):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
if relative:
dBFS = sound.dBFS
else:
dBFS = 0
assert chunk_size > 0 # to avoid infinite loop
while sound[trim_ms:trim_ms+chunk_size].dBFS < dBFS-silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms |
Python | def read_text(fn):
'''
read phone alignments from file of the format:
start end phone
'''
with open(fn) as f:
file_contents = f.read()
phones = file_contents.strip().split()
return phones | def read_text(fn):
'''
read phone alignments from file of the format:
start end phone
'''
with open(fn) as f:
file_contents = f.read()
phones = file_contents.strip().split()
return phones |
Python | def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=200,
iters_per_checkpoint=1000,
seed=1234,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
################################
# Data Parameters #
################################
training_list='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/train.csv',
validation_list='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/val.csv',
mel_mean_std='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/mel_mean_std.npy',
################################
# Data Parameters #
################################
n_mel_channels=80,
n_spc_channels=1025,
n_symbols=41, #
n_speakers=108, #
predict_spectrogram=False,
################################
# Model Parameters #
################################
symbols_embedding_dim=512,
# Text Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
text_encoder_dropout=0.5,
# Audio Encoder parameters
spemb_input=False,
n_frames_per_step_encoder=2,
audio_encoder_hidden_dim=512,
AE_attention_dim=128,
AE_attention_location_n_filters=32,
AE_attention_location_kernel_size=51,
beam_width=10,
# hidden activation
# relu linear tanh
hidden_activation='tanh',
#Speaker Encoder parameters
speaker_encoder_hidden_dim=256,
speaker_encoder_dropout=0.2,
speaker_embedding_dim=128,
#Speaker Classifier parameters
SC_hidden_dim=512,
SC_n_convolutions=3,
SC_kernel_size=1,
# Decoder parameters
feed_back_last=True,
n_frames_per_step_decoder=2,
decoder_rnn_dim=512,
prenet_dim=[256,256],
max_decoder_steps=1000,
stop_threshold=0.5,
# Attention parameters
attention_rnn_dim=512,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=17,
# PostNet parameters
postnet_n_convolutions=5,
postnet_dim=512,
postnet_kernel_size=5,
postnet_dropout=0.5,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
weight_decay=1e-6,
grad_clip_thresh=5.0,
batch_size=32,
contrastive_loss_w=30.0,
speaker_encoder_loss_w=1.0,
text_classifier_loss_w=1.0,
speaker_adversial_loss_w=20.,
speaker_classifier_loss_w=0.1,
ce_loss=False
)
if hparams_string:
tf.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.logging.info('Final parsed hparams: %s', list(hparams.values()))
return hparams | def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=200,
iters_per_checkpoint=1000,
seed=1234,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
################################
# Data Parameters #
################################
training_list='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/train.csv',
validation_list='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/val.csv',
mel_mean_std='/home/users/huiqing_lin/scratch/DS_10283_2651/VCTK-Corpus/mel_mean_std.npy',
################################
# Data Parameters #
################################
n_mel_channels=80,
n_spc_channels=1025,
n_symbols=41, #
n_speakers=108, #
predict_spectrogram=False,
################################
# Model Parameters #
################################
symbols_embedding_dim=512,
# Text Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
text_encoder_dropout=0.5,
# Audio Encoder parameters
spemb_input=False,
n_frames_per_step_encoder=2,
audio_encoder_hidden_dim=512,
AE_attention_dim=128,
AE_attention_location_n_filters=32,
AE_attention_location_kernel_size=51,
beam_width=10,
# hidden activation
# relu linear tanh
hidden_activation='tanh',
#Speaker Encoder parameters
speaker_encoder_hidden_dim=256,
speaker_encoder_dropout=0.2,
speaker_embedding_dim=128,
#Speaker Classifier parameters
SC_hidden_dim=512,
SC_n_convolutions=3,
SC_kernel_size=1,
# Decoder parameters
feed_back_last=True,
n_frames_per_step_decoder=2,
decoder_rnn_dim=512,
prenet_dim=[256,256],
max_decoder_steps=1000,
stop_threshold=0.5,
# Attention parameters
attention_rnn_dim=512,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=17,
# PostNet parameters
postnet_n_convolutions=5,
postnet_dim=512,
postnet_kernel_size=5,
postnet_dropout=0.5,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
weight_decay=1e-6,
grad_clip_thresh=5.0,
batch_size=32,
contrastive_loss_w=30.0,
speaker_encoder_loss_w=1.0,
text_classifier_loss_w=1.0,
speaker_adversial_loss_w=20.,
speaker_classifier_loss_w=0.1,
ce_loss=False
)
if hparams_string:
tf.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.logging.info('Final parsed hparams: %s', list(hparams.values()))
return hparams |
Python | def L2Loss():
"""Returns a layer that computes an L2-like loss for one batch."""
def f(model_output, targets, weights): # pylint: disable=invalid-name
"""Returns weighted sum-of-squared-errors for `model_output` vs. `targets`.
Args:
model_output: Output from one batch, typically a 2- or 3-d array of
float-valued elements.
targets: Tensor of same shape as `model_output` containing element-wise
target values.
weights: Tensor of same shape as `model_output` and `targets`, containing
element-wise weight values.
"""
shapes.assert_same_shape(model_output, targets)
shapes.assert_same_shape(targets, weights)
weighted_sse = weights * (model_output - targets)**2
return jnp.sum(weighted_sse) / jnp.sum(weights)
return base.Fn('L2Loss', f) | def L2Loss():
"""Returns a layer that computes an L2-like loss for one batch."""
def f(model_output, targets, weights): # pylint: disable=invalid-name
"""Returns weighted sum-of-squared-errors for `model_output` vs. `targets`.
Args:
model_output: Output from one batch, typically a 2- or 3-d array of
float-valued elements.
targets: Tensor of same shape as `model_output` containing element-wise
target values.
weights: Tensor of same shape as `model_output` and `targets`, containing
element-wise weight values.
"""
shapes.assert_same_shape(model_output, targets)
shapes.assert_same_shape(targets, weights)
weighted_sse = weights * (model_output - targets)**2
return jnp.sum(weighted_sse) / jnp.sum(weights)
return base.Fn('L2Loss', f) |
Python | def SmoothL1Loss():
"""Returns a layer that computes total smooth L1 loss for one batch."""
def smoothl1loss(model_output, targets, weights): # pylint: disable=invalid-name
r"""Returns weighted smooth L1 norm of `model_output - targets`.
The smooth L1 loss, also known as the Huber loss, is defined as:
.. math::
z_i =
\begin{cases}
0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\
|x_i - y_i| - 0.5, & \text{otherwise }
\end{cases}
Args:
model_output: Output from one batch, treated as an unanalyzed tensor.
targets: Tensor of same shape as `model_output` containing element-wise
target values.
weights: Tensor of same shape as `model_output` and `targets`, containing
element-wise weight values.
"""
shapes.assert_same_shape(model_output, targets)
shapes.assert_same_shape(targets, weights)
l1_dist = jnp.abs(model_output - targets)
smooth_dist = jnp.where(l1_dist < 1,
0.5 * l1_dist**2,
l1_dist - 0.5)
shapes.assert_same_shape(smooth_dist, weights)
weighted_smooth_dist = weights * smooth_dist
return jnp.sum(weighted_smooth_dist) / jnp.sum(weights)
return base.Fn('SmoothL1Loss', smoothl1loss) | def SmoothL1Loss():
"""Returns a layer that computes total smooth L1 loss for one batch."""
def smoothl1loss(model_output, targets, weights): # pylint: disable=invalid-name
r"""Returns weighted smooth L1 norm of `model_output - targets`.
The smooth L1 loss, also known as the Huber loss, is defined as:
.. math::
z_i =
\begin{cases}
0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\
|x_i - y_i| - 0.5, & \text{otherwise }
\end{cases}
Args:
model_output: Output from one batch, treated as an unanalyzed tensor.
targets: Tensor of same shape as `model_output` containing element-wise
target values.
weights: Tensor of same shape as `model_output` and `targets`, containing
element-wise weight values.
"""
shapes.assert_same_shape(model_output, targets)
shapes.assert_same_shape(targets, weights)
l1_dist = jnp.abs(model_output - targets)
smooth_dist = jnp.where(l1_dist < 1,
0.5 * l1_dist**2,
l1_dist - 0.5)
shapes.assert_same_shape(smooth_dist, weights)
weighted_smooth_dist = weights * smooth_dist
return jnp.sum(weighted_smooth_dist) / jnp.sum(weights)
return base.Fn('SmoothL1Loss', smoothl1loss) |
Python | def _CrossEntropy():
"""Returns a layer that computes prediction-target cross entropies."""
def f(model_output, target_category): # pylint: disable=invalid-name
# TODO(pkozakowski): This assertion breaks some tests. Fix and uncomment.
# shapes.assert_shape_equals(target_category, model_output.shape[:-1])
target_distribution = one_hot(target_category, model_output.shape[-1])
return -1.0 * jnp.sum(model_output * target_distribution, axis=-1)
return base.Fn('_CrossEntropy', f) | def _CrossEntropy():
"""Returns a layer that computes prediction-target cross entropies."""
def f(model_output, target_category): # pylint: disable=invalid-name
# TODO(pkozakowski): This assertion breaks some tests. Fix and uncomment.
# shapes.assert_shape_equals(target_category, model_output.shape[:-1])
target_distribution = one_hot(target_category, model_output.shape[-1])
return -1.0 * jnp.sum(model_output * target_distribution, axis=-1)
return base.Fn('_CrossEntropy', f) |
Python | def main(output_file, shebang, vararg_pattern, script_arg):
"""bindit_partial constructs a shell script wrapper for bindit (or your container
runner directly) that can be used as a command line interface for the container. It
works a bit like functools.partial in the standard library - you can offload some
default parameters (e.g. for volume binds mounts) to the script in order to obtain a
cleaner API for the container.
For main documentation, see bindit.
"""
script_arg = list(script_arg)
# detect dryrun mode, and remove that arg. Re-insert it later. (ie, you can generate
# a dryrun app if that's your thing)
dry_index = None
try:
dry_index = script_arg.index("-d")
script_arg = (*script_arg[:dry_index], *script_arg[dry_index + 1:])
except ValueError:
pass
except:
raise
try:
dry_index = script_arg.index("--dryrun")
script_arg = (*script_arg[:dry_index], *script_arg[dry_index + 1:])
except ValueError:
pass
except:
raise
# script_arg[0] does not have to be "bindit" - you could use this to create the
# binds when building the app, and then run e.g. docker directly (might be
# attractive e.g. on HPC if you don't want bindit on the path everywhere). But in
# this case you of course lose the ability to bind new input paths on the fly when
# you run the app.
start_ind = 0
line = ""
if script_arg[0] == "bindit":
start_ind = 1
line = "bindit "
ret = bindit.shell.run(
"bindit", "--dryrun", *script_arg[start_ind:], interactive=False
)
if dry_index:
line += "--dryrun "
# last line (in case you are using verbose bindit args)
line += ret.stdout.split("\n")[-2]
all_lines = [shebang + "\n", line + " " + vararg_pattern + "\n"]
if output_file:
with open(output_file, "w") as file_handle:
file_handle.writelines(all_lines)
else:
sys.stdout.writelines(all_lines)
return | def main(output_file, shebang, vararg_pattern, script_arg):
"""bindit_partial constructs a shell script wrapper for bindit (or your container
runner directly) that can be used as a command line interface for the container. It
works a bit like functools.partial in the standard library - you can offload some
default parameters (e.g. for volume binds mounts) to the script in order to obtain a
cleaner API for the container.
For main documentation, see bindit.
"""
script_arg = list(script_arg)
# detect dryrun mode, and remove that arg. Re-insert it later. (ie, you can generate
# a dryrun app if that's your thing)
dry_index = None
try:
dry_index = script_arg.index("-d")
script_arg = (*script_arg[:dry_index], *script_arg[dry_index + 1:])
except ValueError:
pass
except:
raise
try:
dry_index = script_arg.index("--dryrun")
script_arg = (*script_arg[:dry_index], *script_arg[dry_index + 1:])
except ValueError:
pass
except:
raise
# script_arg[0] does not have to be "bindit" - you could use this to create the
# binds when building the app, and then run e.g. docker directly (might be
# attractive e.g. on HPC if you don't want bindit on the path everywhere). But in
# this case you of course lose the ability to bind new input paths on the fly when
# you run the app.
start_ind = 0
line = ""
if script_arg[0] == "bindit":
start_ind = 1
line = "bindit "
ret = bindit.shell.run(
"bindit", "--dryrun", *script_arg[start_ind:], interactive=False
)
if dry_index:
line += "--dryrun "
# last line (in case you are using verbose bindit args)
line += ret.stdout.split("\n")[-2]
all_lines = [shebang + "\n", line + " " + vararg_pattern + "\n"]
if output_file:
with open(output_file, "w") as file_handle:
file_handle.writelines(all_lines)
else:
sys.stdout.writelines(all_lines)
return |
Python | def remove_redundant_binds(binds):
"""Remove entries in the dict binds that are sub-directories of another key.
Operates in-place.
"""
sources = set(binds.keys())
for candidate in sources:
remaining = sources ^ set([candidate])
# if a parent of candidate is already bound, we can safely remove it
if any([test in candidate.parents for test in remaining]):
del binds[candidate]
return | def remove_redundant_binds(binds):
"""Remove entries in the dict binds that are sub-directories of another key.
Operates in-place.
"""
sources = set(binds.keys())
for candidate in sources:
remaining = sources ^ set([candidate])
# if a parent of candidate is already bound, we can safely remove it
if any([test in candidate.parents for test in remaining]):
del binds[candidate]
return |
Python | def bind_dict_to_arg(mapper, new_binds):
"""Return a generator that converts new_binds to valid container-runner bind
arguments.
Args:
mapper (callable): Function that returns a key, val argument pair when called
with mapper(source, dest). For example, see docker.volume_bind_args
new_binds (dict): binds specified in source:destination format
Returns:
generator: returns a [key, val] argument pair for each key in new_binds
"""
# concatenated list of tuples - surprisingly ugly for python but efficient
return itertools.chain.from_iterable(
(mapper(source, dest) for source, dest in new_binds.items())
) | def bind_dict_to_arg(mapper, new_binds):
"""Return a generator that converts new_binds to valid container-runner bind
arguments.
Args:
mapper (callable): Function that returns a key, val argument pair when called
with mapper(source, dest). For example, see docker.volume_bind_args
new_binds (dict): binds specified in source:destination format
Returns:
generator: returns a [key, val] argument pair for each key in new_binds
"""
# concatenated list of tuples - surprisingly ugly for python but efficient
return itertools.chain.from_iterable(
(mapper(source, dest) for source, dest in new_binds.items())
) |
Python | def parse_container_args(
args_iter, bind_parser=None, valid_args=None, valid_letters=None
):
"""Parse arguments to container runner (e.g., docker run). Typically used as the
first pass of a CLI application (e.g., bindit.docker.docker).
Args:
args_iter (iterator): arg_pairs iterator of arguments
bind_parser (dict): keys as bind mount flags and values as handles to functions
that parse such flags into a {source: dest} dict. See e.g.
bindit.docker.BIND_PARSER
valid_args (dict) : keys as valid container runner arguments and values as the
expected type of the argument (or non for boolean flags). See e.g.
bindit.docker.ARGS
valid_letters (set): single-letter boolean flags. Used to detect arbitrary
combinations of letters (e.g., docker run -it)
Returns:
tuple: (list: detected args to the container runner (DOES NOT include any new
binds at this stage), dict: defines user-provided bind mounts
(manual_binds[source] = dest), str: detected container image)
"""
container_args = []
manual_binds = {}
gotkv = False
for key, val in args_iter:
if key in bind_parser:
# here's a user-defined volume bind. Let's make sure we don't mess with it
# if it appears in the container image arguments
user_bind = bind_parser[key](val)
manual_binds.update(user_bind)
LOGGER.debug(f"added user-defined bind to manual_binds: {user_bind}")
if key in valid_args:
if valid_args[key]:
# this is a key-value pair - se we expect the next argument to be value
gotkv = True
container_args += [key, val]
LOGGER.debug(f"new key-val arg: {key}={val}")
else:
# just a boolean flag - so we don't expect a value argument next
gotkv = False
container_args += [key]
LOGGER.debug(f"new flag: {key}")
elif gotkv:
# this is the value side of a k-v pair - so we expect a key or flag next
gotkv = False
continue
elif valid_letters and not (set(key) - valid_letters):
# multi-letter boolean flag ('docker run -it' and such)
gotkv = False
container_args += [key]
LOGGER.debug(f"multi-letter flag: {key}")
else:
# something that's not a key or a value. So it's the container name by
# process of elimination
container_name = key
LOGGER.debug(f"identified container as: {key}")
break
return container_args, manual_binds, container_name | def parse_container_args(
args_iter, bind_parser=None, valid_args=None, valid_letters=None
):
"""Parse arguments to container runner (e.g., docker run). Typically used as the
first pass of a CLI application (e.g., bindit.docker.docker).
Args:
args_iter (iterator): arg_pairs iterator of arguments
bind_parser (dict): keys as bind mount flags and values as handles to functions
that parse such flags into a {source: dest} dict. See e.g.
bindit.docker.BIND_PARSER
valid_args (dict) : keys as valid container runner arguments and values as the
expected type of the argument (or non for boolean flags). See e.g.
bindit.docker.ARGS
valid_letters (set): single-letter boolean flags. Used to detect arbitrary
combinations of letters (e.g., docker run -it)
Returns:
tuple: (list: detected args to the container runner (DOES NOT include any new
binds at this stage), dict: defines user-provided bind mounts
(manual_binds[source] = dest), str: detected container image)
"""
container_args = []
manual_binds = {}
gotkv = False
for key, val in args_iter:
if key in bind_parser:
# here's a user-defined volume bind. Let's make sure we don't mess with it
# if it appears in the container image arguments
user_bind = bind_parser[key](val)
manual_binds.update(user_bind)
LOGGER.debug(f"added user-defined bind to manual_binds: {user_bind}")
if key in valid_args:
if valid_args[key]:
# this is a key-value pair - se we expect the next argument to be value
gotkv = True
container_args += [key, val]
LOGGER.debug(f"new key-val arg: {key}={val}")
else:
# just a boolean flag - so we don't expect a value argument next
gotkv = False
container_args += [key]
LOGGER.debug(f"new flag: {key}")
elif gotkv:
# this is the value side of a k-v pair - so we expect a key or flag next
gotkv = False
continue
elif valid_letters and not (set(key) - valid_letters):
# multi-letter boolean flag ('docker run -it' and such)
gotkv = False
container_args += [key]
LOGGER.debug(f"multi-letter flag: {key}")
else:
# something that's not a key or a value. So it's the container name by
# process of elimination
container_name = key
LOGGER.debug(f"identified container as: {key}")
break
return container_args, manual_binds, container_name |
Python | def arg_to_file_paths(arg):
"""Generator that returns valid file paths in the input arg, splitting according to
shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if
they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as
parents.
"""
for candidate in shlex.split(arg):
for this_split in re.split(ARG_SPLIT_PATTERN, candidate):
if not this_split:
# skip empty str since these get mapped as valid '.' paths
continue
this_path = pathlib.Path(this_split)
abs_ok = this_path.is_absolute() or not ABS_ONLY
# check that this_path is not in an ignored path or its sub-directories
resolved_path = this_path.resolve()
ignore_ok = all(
[
not this_ignore == resolved_path
and this_ignore not in resolved_path.parents
for this_ignore in IGNORE_PATH
]
)
# any non-existent path is fine as long as it's absolute
# but relative paths must exist to control false positives
exist_ok = this_path.is_absolute() or resolved_path.exists()
if exist_ok:
LOGGER.debug(f"detected path {this_path}")
LOGGER.debug(f"absolute path pass={abs_ok}")
LOGGER.debug(f"ignore path pass={ignore_ok}")
if exist_ok and abs_ok and ignore_ok:
yield this_path | def arg_to_file_paths(arg):
"""Generator that returns valid file paths in the input arg, splitting according to
shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if
they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as
parents.
"""
for candidate in shlex.split(arg):
for this_split in re.split(ARG_SPLIT_PATTERN, candidate):
if not this_split:
# skip empty str since these get mapped as valid '.' paths
continue
this_path = pathlib.Path(this_split)
abs_ok = this_path.is_absolute() or not ABS_ONLY
# check that this_path is not in an ignored path or its sub-directories
resolved_path = this_path.resolve()
ignore_ok = all(
[
not this_ignore == resolved_path
and this_ignore not in resolved_path.parents
for this_ignore in IGNORE_PATH
]
)
# any non-existent path is fine as long as it's absolute
# but relative paths must exist to control false positives
exist_ok = this_path.is_absolute() or resolved_path.exists()
if exist_ok:
LOGGER.debug(f"detected path {this_path}")
LOGGER.debug(f"absolute path pass={abs_ok}")
LOGGER.debug(f"ignore path pass={ignore_ok}")
if exist_ok and abs_ok and ignore_ok:
yield this_path |
Python | def parse_image_args(args_iter, manual_binds):
"""Parse arguments to the container image, rebasing binds as necessary to make paths
available inside the container. Typically used as the second pass of a CLI
application (following parse_container_args, see e.g., bindit.docker.docker).
Args:
args_iter (iterator): arg_pairs iterator of arguments (generally the same you
would use in parse_container_args to make sure you're in the right place)
manual_binds (dict): defines user-provided bind mounts
(manual_binds[source] = dest)
Returns:
tuple: (list: args to the image (DOES include rebasing of any args that are
deemed file paths according to new_binds), dict: defines new bind mounts
(new_binds[source] = dest))
"""
image_args = []
new_binds = {}
# So we continue working on the same iterator... but now we don't care about
# key/value - we just want the keys (and because we added a final None, the final _
# is always irrelevant)
for in_arg, _ in args_iter:
if in_arg is None:
# special case - container with no image_args
continue
# handle potentially multiple paths in this in_arg
for this_path in arg_to_file_paths(in_arg):
# we have a path that needs to be remapped
full_path = this_path.resolve()
this_dir = full_path.parent
# can only bind directories
if full_path.is_dir():
this_dir = full_path
# detect manual binds that have a shared base
try:
# pick the first manually-specified bind that matches
manual_parent = next(
this_manual_bind
for this_manual_bind in manual_binds.keys()
if this_manual_bind == this_dir
or this_manual_bind in this_dir.parents
)
# use the manual_bind to map (inserting any additional sub-directories
# as necessary)
new_base = manual_binds[manual_parent] / this_dir.relative_to(
manual_parent
)
LOGGER.debug(f"rebasing on manual bind: {new_base}")
except StopIteration:
LOGGER.debug(f"none of these manual binds match: {manual_binds.keys()}")
# no manual binds match, so the remaining possibility is that it's a new
# bind
if this_dir not in new_binds:
new_binds[this_dir] = pathlib.PosixPath(
"/bindit"
) / this_dir.relative_to(this_dir.anchor)
LOGGER.debug(f"creating new bind: {new_binds[this_dir]}")
# NB indent - the bind might already exist
new_base = new_binds[this_dir]
except:
# something else went wrong with that tricky generator expression
raise
# and we now need to remap the original in_arg accordingly
new_path = new_base / full_path.name
if full_path.is_dir():
# avoid repeating the directory name twice (the one edge case where the
# old os.path.split made more sense than pathlib)
new_path = new_base
LOGGER.debug(f"rebasing in_arg path: {this_path}:{new_path}")
in_arg = in_arg.replace(str(this_path), str(new_path))
# NB indent - in all cases in_arg needs to be added to image_args
image_args.append(in_arg)
# avoid binding the same path twice (ie, parent and sub-directory)
remove_redundant_binds(new_binds)
return image_args, new_binds | def parse_image_args(args_iter, manual_binds):
"""Parse arguments to the container image, rebasing binds as necessary to make paths
available inside the container. Typically used as the second pass of a CLI
application (following parse_container_args, see e.g., bindit.docker.docker).
Args:
args_iter (iterator): arg_pairs iterator of arguments (generally the same you
would use in parse_container_args to make sure you're in the right place)
manual_binds (dict): defines user-provided bind mounts
(manual_binds[source] = dest)
Returns:
tuple: (list: args to the image (DOES include rebasing of any args that are
deemed file paths according to new_binds), dict: defines new bind mounts
(new_binds[source] = dest))
"""
image_args = []
new_binds = {}
# So we continue working on the same iterator... but now we don't care about
# key/value - we just want the keys (and because we added a final None, the final _
# is always irrelevant)
for in_arg, _ in args_iter:
if in_arg is None:
# special case - container with no image_args
continue
# handle potentially multiple paths in this in_arg
for this_path in arg_to_file_paths(in_arg):
# we have a path that needs to be remapped
full_path = this_path.resolve()
this_dir = full_path.parent
# can only bind directories
if full_path.is_dir():
this_dir = full_path
# detect manual binds that have a shared base
try:
# pick the first manually-specified bind that matches
manual_parent = next(
this_manual_bind
for this_manual_bind in manual_binds.keys()
if this_manual_bind == this_dir
or this_manual_bind in this_dir.parents
)
# use the manual_bind to map (inserting any additional sub-directories
# as necessary)
new_base = manual_binds[manual_parent] / this_dir.relative_to(
manual_parent
)
LOGGER.debug(f"rebasing on manual bind: {new_base}")
except StopIteration:
LOGGER.debug(f"none of these manual binds match: {manual_binds.keys()}")
# no manual binds match, so the remaining possibility is that it's a new
# bind
if this_dir not in new_binds:
new_binds[this_dir] = pathlib.PosixPath(
"/bindit"
) / this_dir.relative_to(this_dir.anchor)
LOGGER.debug(f"creating new bind: {new_binds[this_dir]}")
# NB indent - the bind might already exist
new_base = new_binds[this_dir]
except:
# something else went wrong with that tricky generator expression
raise
# and we now need to remap the original in_arg accordingly
new_path = new_base / full_path.name
if full_path.is_dir():
# avoid repeating the directory name twice (the one edge case where the
# old os.path.split made more sense than pathlib)
new_path = new_base
LOGGER.debug(f"rebasing in_arg path: {this_path}:{new_path}")
in_arg = in_arg.replace(str(this_path), str(new_path))
# NB indent - in all cases in_arg needs to be added to image_args
image_args.append(in_arg)
# avoid binding the same path twice (ie, parent and sub-directory)
remove_redundant_binds(new_binds)
return image_args, new_binds |
Python | def run(*arg, interactive=False):
"""subprocess.run wrapper to handle exceptions, writing to stdout/stderr or not."""
stdout = subprocess.PIPE
stderr = subprocess.PIPE
if interactive:
stdout = None
stderr = None
try:
ret = subprocess.run(
arg, stdout=stdout, stderr=stderr, check=True, shell=False, encoding="utf-8"
)
except subprocess.CalledProcessError as ret:
print(f"command line exception with args: {arg}")
if not interactive:
sys.stdout.write(ret.stdout)
sys.stderr.write(ret.stderr)
sys.exit(ret.returncode)
except BaseException:
raise
return ret | def run(*arg, interactive=False):
"""subprocess.run wrapper to handle exceptions, writing to stdout/stderr or not."""
stdout = subprocess.PIPE
stderr = subprocess.PIPE
if interactive:
stdout = None
stderr = None
try:
ret = subprocess.run(
arg, stdout=stdout, stderr=stderr, check=True, shell=False, encoding="utf-8"
)
except subprocess.CalledProcessError as ret:
print(f"command line exception with args: {arg}")
if not interactive:
sys.stdout.write(ret.stdout)
sys.stderr.write(ret.stderr)
sys.exit(ret.returncode)
except BaseException:
raise
return ret |
Python | def is_running(self):
"""return true if the container is currently running."""
ret = bindit.shell.run(
"docker", "container", "ls", "-q", "--all", "-f", f"name={self.name}"
)
if ret.stdout:
return True
return False | def is_running(self):
"""return true if the container is currently running."""
ret = bindit.shell.run(
"docker", "container", "ls", "-q", "--all", "-f", f"name={self.name}"
)
if ret.stdout:
return True
return False |
Python | def has_file(self, destpath):
"""return true if destpath is present."""
ret = self.exec("ls", "-1", destpath.parent)
files = ret.stdout.split("\n")
return destpath.name in files | def has_file(self, destpath):
"""return true if destpath is present."""
ret = self.exec("ls", "-1", destpath.parent)
files = ret.stdout.split("\n")
return destpath.name in files |
Python | def main(loglevel, dryrun, absonly, ignorepath):
"""bindit is a wrapper for container runners that makes it easy to handle file input
and output for containerized command-line applications. It works by detecting file
paths in the container image arguments, and rebasing these as necessary onto new
bind mounts.
"""
bindit.LOGGER.setLevel(loglevel)
bindit.DRY_RUN = dryrun
bindit.ABS_ONLY = absonly
bindit.IGNORE_PATH += [pathlib.Path(p) for p in ignorepath]
return | def main(loglevel, dryrun, absonly, ignorepath):
"""bindit is a wrapper for container runners that makes it easy to handle file input
and output for containerized command-line applications. It works by detecting file
paths in the container image arguments, and rebasing these as necessary onto new
bind mounts.
"""
bindit.LOGGER.setLevel(loglevel)
bindit.DRY_RUN = dryrun
bindit.ABS_ONLY = absonly
bindit.IGNORE_PATH += [pathlib.Path(p) for p in ignorepath]
return |
Python | def infer_docker_cli():
"""infer valid docker run arguments by parsing the output from docker run --help.
Returns a dict of key-value pairs and a set of single-letter flags (a quirk of the
docker API is that multiple letters can be combined under a single hyphen, e.g.
-it, but only if these are short-hand versions of boolean flags, so e.g. -v can't be
used in this way). Provides inputs for bindit.parse_container_args."""
try:
ret = bindit.shell.run("docker", "run", "--help")
except FileNotFoundError:
bindit.LOGGER.warning(
"WARNING: docker not on path, functionality will be limited."
)
return {}, set()
except:
raise
rows = ret.stdout.split("\n")
valid_args = {}
# these can be arbitrarily combined (e.g. -it) so need to be parsed separately
letters = ["-"]
for thisrow in rows:
# this works because there is only one arg per row
try:
# -1 to skip the whitespace
indstart = thisrow.index(" --") + 1
# we got one
indend = thisrow[indstart:].index(" ") + indstart
newflag = thisrow[indstart:indend]
# empty if it's a boolean flag, type if it's a key-value pair
value = thisrow[indend + 1:].split(" ")[0]
bindit.LOGGER.debug(f"new flag {newflag} {value}")
valid_args[newflag] = value
# there might also be a short-hand version, which is always single-hyphen,
# single character
if thisrow[2] == "-":
bindit.LOGGER.debug(f"\twith short-hand letter version {thisrow[2:4]}")
valid_args[thisrow[2:4]] = value
letters.append(thisrow[3])
except ValueError:
# bad index means no hit
pass
except BaseException:
raise
return valid_args, set(letters) | def infer_docker_cli():
"""infer valid docker run arguments by parsing the output from docker run --help.
Returns a dict of key-value pairs and a set of single-letter flags (a quirk of the
docker API is that multiple letters can be combined under a single hyphen, e.g.
-it, but only if these are short-hand versions of boolean flags, so e.g. -v can't be
used in this way). Provides inputs for bindit.parse_container_args."""
try:
ret = bindit.shell.run("docker", "run", "--help")
except FileNotFoundError:
bindit.LOGGER.warning(
"WARNING: docker not on path, functionality will be limited."
)
return {}, set()
except:
raise
rows = ret.stdout.split("\n")
valid_args = {}
# these can be arbitrarily combined (e.g. -it) so need to be parsed separately
letters = ["-"]
for thisrow in rows:
# this works because there is only one arg per row
try:
# -1 to skip the whitespace
indstart = thisrow.index(" --") + 1
# we got one
indend = thisrow[indstart:].index(" ") + indstart
newflag = thisrow[indstart:indend]
# empty if it's a boolean flag, type if it's a key-value pair
value = thisrow[indend + 1:].split(" ")[0]
bindit.LOGGER.debug(f"new flag {newflag} {value}")
valid_args[newflag] = value
# there might also be a short-hand version, which is always single-hyphen,
# single character
if thisrow[2] == "-":
bindit.LOGGER.debug(f"\twith short-hand letter version {thisrow[2:4]}")
valid_args[thisrow[2:4]] = value
letters.append(thisrow[3])
except ValueError:
# bad index means no hit
pass
except BaseException:
raise
return valid_args, set(letters) |
Python | def volume_bind_args(source, dest):
"""return tuple specifying a source:dest volume bind mount in docker format."""
# docker run struggle to follow symlinks on mac successfully, see
# https://github.com/docker/for-mac/issues/1298
# (and tempfile generates symlinked /var paths...)
source = pathlib.Path(source).resolve()
return "-v", f"{source}:{dest}" | def volume_bind_args(source, dest):
"""return tuple specifying a source:dest volume bind mount in docker format."""
# docker run struggle to follow symlinks on mac successfully, see
# https://github.com/docker/for-mac/issues/1298
# (and tempfile generates symlinked /var paths...)
source = pathlib.Path(source).resolve()
return "-v", f"{source}:{dest}" |
Python | def parse_bind_mount(bind_arg):
"""unpack bind-mount bind_arg (e.g., src=/foo,dst=/bar) to dict where the key is a
resolve pathlib.Path and the value is an unresolved (in-container)
pathlib.PosixPath."""
mount_dict = dict([kv.split("=") for kv in bind_arg.split(",")])
source_key = next(k for k in mount_dict if k in ["source", "src"])
dest_key = next(k for k in mount_dict if k in ["destination", "dst", "target"])
return {
pathlib.Path(mount_dict[source_key]).resolve(): pathlib.PosixPath(
mount_dict[dest_key]
)
} | def parse_bind_mount(bind_arg):
"""unpack bind-mount bind_arg (e.g., src=/foo,dst=/bar) to dict where the key is a
resolve pathlib.Path and the value is an unresolved (in-container)
pathlib.PosixPath."""
mount_dict = dict([kv.split("=") for kv in bind_arg.split(",")])
source_key = next(k for k in mount_dict if k in ["source", "src"])
dest_key = next(k for k in mount_dict if k in ["destination", "dst", "target"])
return {
pathlib.Path(mount_dict[source_key]).resolve(): pathlib.PosixPath(
mount_dict[dest_key]
)
} |
Python | def parse_bind_volume(bind_arg):
"""unpack volume bind bind_arg (e.g., /foo:/bar) to dict where the key is a
resolve pathlib.Path and the value is an unresolved (in-container)
pathlib.PosixPath."""
# can be up to three, but we only want the first two
bind_arg = bind_arg.split(":")
src, dst = bind_arg[:2]
assert len(bind_arg) < 4, "unexpected number of bind_arg"
return {pathlib.Path(src).resolve(): pathlib.PosixPath(dst)} | def parse_bind_volume(bind_arg):
"""unpack volume bind bind_arg (e.g., /foo:/bar) to dict where the key is a
resolve pathlib.Path and the value is an unresolved (in-container)
pathlib.PosixPath."""
# can be up to three, but we only want the first two
bind_arg = bind_arg.split(":")
src, dst = bind_arg[:2]
assert len(bind_arg) < 4, "unexpected number of bind_arg"
return {pathlib.Path(src).resolve(): pathlib.PosixPath(dst)} |
Python | def run(run_args):
"""click.command that casts run_args to lists and handles parsing of the arguments,
adding volume binds as necessary and running the container (if not DRY_RUN)."""
args_iter = bindit.arg_pairs(run_args)
# handle arguments to the container runner
container_args, manual_binds, container_name = bindit.parse_container_args(
args_iter, bind_parser=BIND_PARSER, valid_args=ARGS, valid_letters=LETTERS
)
# handle arguments to the image, including any rebasing of paths
image_args, new_binds = bindit.parse_image_args(args_iter, manual_binds)
# construct new binds in docker format
bind_args = list(bindit.bind_dict_to_arg(volume_bind_args, new_binds))
# generate the final command by inserting the new binds
final_command = (
["docker", "run"] + container_args + bind_args + [container_name] + image_args
)
# write out to stdout with appropriate escapes
sys.stdout.write(bindit.shell.join_and_quote(final_command) + "\n")
if bindit.DRY_RUN:
return 0
# run the beast
ret = bindit.shell.run(*final_command, interactive=True)
return ret.returncode | def run(run_args):
"""click.command that casts run_args to lists and handles parsing of the arguments,
adding volume binds as necessary and running the container (if not DRY_RUN)."""
args_iter = bindit.arg_pairs(run_args)
# handle arguments to the container runner
container_args, manual_binds, container_name = bindit.parse_container_args(
args_iter, bind_parser=BIND_PARSER, valid_args=ARGS, valid_letters=LETTERS
)
# handle arguments to the image, including any rebasing of paths
image_args, new_binds = bindit.parse_image_args(args_iter, manual_binds)
# construct new binds in docker format
bind_args = list(bindit.bind_dict_to_arg(volume_bind_args, new_binds))
# generate the final command by inserting the new binds
final_command = (
["docker", "run"] + container_args + bind_args + [container_name] + image_args
)
# write out to stdout with appropriate escapes
sys.stdout.write(bindit.shell.join_and_quote(final_command) + "\n")
if bindit.DRY_RUN:
return 0
# run the beast
ret = bindit.shell.run(*final_command, interactive=True)
return ret.returncode |
Python | def bar(self, x, y, z, size=10e3, color=None, bottom=0.):
"""
Plot cesiumpy.Cylinder like bar plot
Parameters
----------
x : list
List of longitudes
y : list
List of latitudes
z : list
List of bar heights
size : list or float, default 10e3
Radius of cylinder
color : list or Color
Cylinder color
bottom : list or float, default 0
Bottom heights
"""
x = com.validate_listlike(x, key='x')
# for list validation (not allow scalar)
y = com.validate_listlike(y, key='y')
# for length validation
y = self._fill_by(y, len(x), key='y')
# z must be a list
z = com.validate_listlike(z, key='z')
z = self._fill_by(z, len(x), key='y')
size = self._fill_by(size, len(x), key='size', default=10e3)
color = self._fill_by(color, len(x), key='color')
bottom = self._fill_by(bottom, len(x), key='bottom', default=0.)
it = zip(x, y, z, size, color, bottom)
for i, (_x, _y, _z, _size, _color, _bottom) in enumerate(it):
p = cesiumpy.Cylinder(position=(_x, _y, _bottom + _z / 2.),
length=_z,
topRadius=_size, bottomRadius=_size,
material=_color)
self.widget.entities.add(p)
return self.widget | def bar(self, x, y, z, size=10e3, color=None, bottom=0.):
"""
Plot cesiumpy.Cylinder like bar plot
Parameters
----------
x : list
List of longitudes
y : list
List of latitudes
z : list
List of bar heights
size : list or float, default 10e3
Radius of cylinder
color : list or Color
Cylinder color
bottom : list or float, default 0
Bottom heights
"""
x = com.validate_listlike(x, key='x')
# for list validation (not allow scalar)
y = com.validate_listlike(y, key='y')
# for length validation
y = self._fill_by(y, len(x), key='y')
# z must be a list
z = com.validate_listlike(z, key='z')
z = self._fill_by(z, len(x), key='y')
size = self._fill_by(size, len(x), key='size', default=10e3)
color = self._fill_by(color, len(x), key='color')
bottom = self._fill_by(bottom, len(x), key='bottom', default=0.)
it = zip(x, y, z, size, color, bottom)
for i, (_x, _y, _z, _size, _color, _bottom) in enumerate(it):
p = cesiumpy.Cylinder(position=(_x, _y, _bottom + _z / 2.),
length=_z,
topRadius=_size, bottomRadius=_size,
material=_color)
self.widget.entities.add(p)
return self.widget |
Python | def scatter(self, x, y, z=None, size=None, color=None):
"""
Plot cesiumpy.Point like scatter plot
Parameters
----------
x : list
List of longitudes
y : list
List of latitudes
z : list or float
Height
size : list or float
Pixel size
color : list or Color
Point color
"""
x = com.validate_listlike(x, key='x')
# for list validation (not allow scalar)
y = com.validate_listlike(y, key='y')
# for length validation
y = self._fill_by(y, len(x), key='y')
z = self._fill_by(z, len(x), key='z', default=0)
size = self._fill_by(size, len(x), key='size')
color = self._fill_by(color, len(x), key='color')
for i, (_x, _y, _z, _size, _color) in enumerate(zip(x, y, z, size, color)):
p = cesiumpy.Point(position=(_x, _y, _z), pixelSize=_size,
color=_color)
self.widget.entities.add(p)
return self.widget | def scatter(self, x, y, z=None, size=None, color=None):
"""
Plot cesiumpy.Point like scatter plot
Parameters
----------
x : list
List of longitudes
y : list
List of latitudes
z : list or float
Height
size : list or float
Pixel size
color : list or Color
Point color
"""
x = com.validate_listlike(x, key='x')
# for list validation (not allow scalar)
y = com.validate_listlike(y, key='y')
# for length validation
y = self._fill_by(y, len(x), key='y')
z = self._fill_by(z, len(x), key='z', default=0)
size = self._fill_by(size, len(x), key='size')
color = self._fill_by(color, len(x), key='color')
for i, (_x, _y, _z, _size, _color) in enumerate(zip(x, y, z, size, color)):
p = cesiumpy.Point(position=(_x, _y, _z), pixelSize=_size,
color=_color)
self.widget.entities.add(p)
return self.widget |
Python | def camel_case_to_snake_case(name: str) -> str:
'''
Convert camelCase string to snake_case.
'''
pattern_1: Pattern = re.compile(r'(.)([A-Z][a-z]+)')
pattern_2: Pattern = re.compile(r'([a-z0-9])([A-Z])')
name = pattern_1.sub(r'\1_\2', name)
return pattern_2.sub(r'\1_\2', name).lower() | def camel_case_to_snake_case(name: str) -> str:
'''
Convert camelCase string to snake_case.
'''
pattern_1: Pattern = re.compile(r'(.)([A-Z][a-z]+)')
pattern_2: Pattern = re.compile(r'([a-z0-9])([A-Z])')
name = pattern_1.sub(r'\1_\2', name)
return pattern_2.sub(r'\1_\2', name).lower() |
Python | def snake_case_to_camel_case(name: str) -> str:
'''
Convert snake_case string to camelCase.
'''
parts: List[str] = name.split('_')
return parts[0] + ''.join(x.capitalize() if x else '_' for x in parts[1:]) | def snake_case_to_camel_case(name: str) -> str:
'''
Convert snake_case string to camelCase.
'''
parts: List[str] = name.split('_')
return parts[0] + ''.join(x.capitalize() if x else '_' for x in parts[1:]) |
Python | def generate_script(self, widget = None):
'''
Return list of scripts built from entities
each script may be a list of commands also
'''
results = []
for item in self._items:
script = '{varname}.{propertyname}.add({item});'.format(
varname = (widget or self.widget)._varname,
propertyname = self._propertyname,
item = item.generate_script(widget = (widget or self.widget))
)
results.append(script)
return results | def generate_script(self, widget = None):
'''
Return list of scripts built from entities
each script may be a list of commands also
'''
results = []
for item in self._items:
script = '{varname}.{propertyname}.add({item});'.format(
varname = (widget or self.widget)._varname,
propertyname = self._propertyname,
item = item.generate_script(widget = (widget or self.widget))
)
results.append(script)
return results |
Python | def to_entity(shape):
"""
Convert shapely.geometry to corresponding entities.
Result may be a list if geometry is consists from multiple instances.
"""
if isinstance(shape, ShapelyMultiPoint):
return [cesiumpy.Point(position=e) for e in shape]
elif isinstance(shape, ShapelyPoint):
return cesiumpy.Point(position=shape)
elif isinstance(shape, ShapelyMultiLineString):
return [cesiumpy.Polyline(positions=e) for e in shape]
elif isinstance(shape, (ShapelyLineString, ShapelyLinearRing)):
return cesiumpy.Polyline(positions=shape)
elif isinstance(shape, ShapelyMultiPolygon):
return [cesiumpy.Polygon(hierarchy=e) for e in shape]
elif isinstance(shape, ShapelyPolygon):
return cesiumpy.Polygon(hierarchy=shape)
msg = 'Unable to convert to cesiumpy entity: {shape}'.format(shape=shape)
raise ValueError(msg) | def to_entity(shape):
"""
Convert shapely.geometry to corresponding entities.
Result may be a list if geometry is consists from multiple instances.
"""
if isinstance(shape, ShapelyMultiPoint):
return [cesiumpy.Point(position=e) for e in shape]
elif isinstance(shape, ShapelyPoint):
return cesiumpy.Point(position=shape)
elif isinstance(shape, ShapelyMultiLineString):
return [cesiumpy.Polyline(positions=e) for e in shape]
elif isinstance(shape, (ShapelyLineString, ShapelyLinearRing)):
return cesiumpy.Polyline(positions=shape)
elif isinstance(shape, ShapelyMultiPolygon):
return [cesiumpy.Polygon(hierarchy=e) for e in shape]
elif isinstance(shape, ShapelyPolygon):
return cesiumpy.Polygon(hierarchy=shape)
msg = 'Unable to convert to cesiumpy entity: {shape}'.format(shape=shape)
raise ValueError(msg) |
Python | def fromColor(self, color, size=48):
"""
Create pin specifying color and size
Parameters
----------
color : Color
A Property specifying the Color of the pin.
size : int, default 48
A Property specifying the size of the pin.
"""
return Pin(color=color, size=size) | def fromColor(self, color, size=48):
"""
Create pin specifying color and size
Parameters
----------
color : Color
A Property specifying the Color of the pin.
size : int, default 48
A Property specifying the size of the pin.
"""
return Pin(color=color, size=size) |
Python | def fromText(self, text, color=None, size=48):
"""
Create pin specifying text, color and size
Parameters
----------
text : str
A Property specifying the text of the pin.
color : Color
A Property specifying the Color of the pin.
size : int, default 48
A Property specifying the size of the pin.
"""
# validate text is not None
if text is None:
self.text.error(self.text, text)
return Pin(color=color, size=size, text=text) | def fromText(self, text, color=None, size=48):
"""
Create pin specifying text, color and size
Parameters
----------
text : str
A Property specifying the text of the pin.
color : Color
A Property specifying the Color of the pin.
size : int, default 48
A Property specifying the size of the pin.
"""
# validate text is not None
if text is None:
self.text.error(self.text, text)
return Pin(color=color, size=size, text=text) |
Python | def eastNorthUpToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
east-north-up axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local east direction.
- The y axis points in the local north direction.
- The z axis points in the direction of the ellipsoid surface normal
which passes through the position.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='eastNorthUpToFixedFrame') | def eastNorthUpToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
east-north-up axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local east direction.
- The y axis points in the local north direction.
- The z axis points in the direction of the ellipsoid surface normal
which passes through the position.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='eastNorthUpToFixedFrame') |
Python | def northEastDownToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
north-east-down axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local north direction.
- The y axis points in the local east direction.
- The z axis points in the opposite direction of the ellipsoid surface
normal which passes through the position.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='northEastDownToFixedFrame') | def northEastDownToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
north-east-down axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local north direction.
- The y axis points in the local east direction.
- The z axis points in the opposite direction of the ellipsoid surface
normal which passes through the position.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='northEastDownToFixedFrame') |
Python | def northUpEastToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
north-up-east axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local north direction.
- The y axis points in the direction of the ellipsoid surface normal
which passes through the position.
- The z axis points in the local east direction.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='northUpEastToFixedFrame') | def northUpEastToFixedFrame(cls, origin):
"""
Computes a 4x4 transformation matrix from a reference frame with an
north-up-east axes centered at the provided origin to the provided
ellipsoid's fixed reference frame. The local axes are defined as:
- The x axis points in the local north direction.
- The y axis points in the direction of the ellipsoid surface normal
which passes through the position.
- The z axis points in the local east direction.
Parameters
----------
origin : Cartesian3
The center point of the local reference frame.
"""
return Transforms(origin, transform='northUpEastToFixedFrame') |
Python | def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
print conf
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf | def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
print conf
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf |
Python | def _conferenceRegistration(self, request, reg):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval) | def _conferenceRegistration(self, request, reg):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval) |
Python | def _createSpeakerObject(self, data):
"""Create Speaker object, return Speaker key"""
# allocate new Speaker ID
speaker_id = Speaker.allocate_ids(size=1)[0]
# make Speaker key fom ID
speaker_key = ndb.Key(Speaker, speaker_id)
# Create Speaker and return Speaker key
speaker = Speaker(name=data,
key=speaker_key)
speaker.put()
return speaker_key | def _createSpeakerObject(self, data):
"""Create Speaker object, return Speaker key"""
# allocate new Speaker ID
speaker_id = Speaker.allocate_ids(size=1)[0]
# make Speaker key fom ID
speaker_key = ndb.Key(Speaker, speaker_id)
# Create Speaker and return Speaker key
speaker = Speaker(name=data,
key=speaker_key)
speaker.put()
return speaker_key |
Python | def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Speaker to SpeakerForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker, field.name))
# convert key to urlsafe
elif field.name == "websafeSpeakerKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf | def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Speaker to SpeakerForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker, field.name))
# convert key to urlsafe
elif field.name == "websafeSpeakerKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf |
Python | def querySpeakers(self, request):
""" Query for speakers. Used to get urlsafe Speaker keys,
which can then be used to query conferences by speaker
"""
speakers = Speaker.query().order(Speaker.name)
# return individual SpeakerForm object per Speaker
return SpeakerForms(
items=[self._copySpeakerToForm(speaker) \
for speaker in speakers]
) | def querySpeakers(self, request):
""" Query for speakers. Used to get urlsafe Speaker keys,
which can then be used to query conferences by speaker
"""
speakers = Speaker.query().order(Speaker.name)
# return individual SpeakerForm object per Speaker
return SpeakerForms(
items=[self._copySpeakerToForm(speaker) \
for speaker in speakers]
) |
Python | def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SpeakerForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# get conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can add sessions.')
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# convert dates and times from strings to Date objects;
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
if data['speaker']:
speaker = Speaker.query()
speaker = speaker.filter(Speaker.name == data['speaker']).get()
# Does this speaker already exist?
if speaker:
# store existing Speaker key as speaker
data['speaker'] = speaker.key
else:
# create new Speaker and store key
data['speaker'] = self._createSpeakerObject(data['speaker'])
speaker = data['speaker'].get()
# featured speaker task
taskqueue.add(
params={'websafeConferenceKey': request.websafeConferenceKey,
'websafeSpeakerKey': speaker.key.urlsafe()},
url='/tasks/update_featured_speaker',
method='GET'
)
# allocate new Session ID with Conference key as parent
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
# make Session key from ID
s_key = ndb.Key(Session, s_id, parent=conf.key)
# now I should be able to use s_key.parent() to access the parent Conference as well
data['key'] = s_key
del data['websafeConferenceKey']
del data['websafeKey']
# create Session & return SessionForm
session_key = Session(**data).put()
return self._copySessionToForm(session_key.get()) | def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SpeakerForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# get conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can add sessions.')
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# convert dates and times from strings to Date objects;
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
if data['speaker']:
speaker = Speaker.query()
speaker = speaker.filter(Speaker.name == data['speaker']).get()
# Does this speaker already exist?
if speaker:
# store existing Speaker key as speaker
data['speaker'] = speaker.key
else:
# create new Speaker and store key
data['speaker'] = self._createSpeakerObject(data['speaker'])
speaker = data['speaker'].get()
# featured speaker task
taskqueue.add(
params={'websafeConferenceKey': request.websafeConferenceKey,
'websafeSpeakerKey': speaker.key.urlsafe()},
url='/tasks/update_featured_speaker',
method='GET'
)
# allocate new Session ID with Conference key as parent
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
# make Session key from ID
s_key = ndb.Key(Session, s_id, parent=conf.key)
# now I should be able to use s_key.parent() to access the parent Conference as well
data['key'] = s_key
del data['websafeConferenceKey']
del data['websafeKey']
# create Session & return SessionForm
session_key = Session(**data).put()
return self._copySessionToForm(session_key.get()) |
Python | def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert date and time to strings;
if field.name.endswith('date'):
setattr(sf, field.name, str(getattr(session, field.name)))
elif field.name.endswith('startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
# convert Speaker url safe key to speaker name
elif field.name.endswith('speaker'):
speaker_key = getattr(session, field.name)
if speaker_key is not None:
speaker = speaker_key.get()
setattr(sf, field.name, speaker.name)
else:
setattr(sf, field.name, None)
# just copy others
else:
setattr(sf, field.name, getattr(session, field.name))
# convert key to urlsafe
elif field.name == "websafeConferenceKey":
setattr(sf, field.name, session.key.parent().urlsafe())
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf | def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert date and time to strings;
if field.name.endswith('date'):
setattr(sf, field.name, str(getattr(session, field.name)))
elif field.name.endswith('startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
# convert Speaker url safe key to speaker name
elif field.name.endswith('speaker'):
speaker_key = getattr(session, field.name)
if speaker_key is not None:
speaker = speaker_key.get()
setattr(sf, field.name, speaker.name)
else:
setattr(sf, field.name, None)
# just copy others
else:
setattr(sf, field.name, getattr(session, field.name))
# convert key to urlsafe
elif field.name == "websafeConferenceKey":
setattr(sf, field.name, session.key.parent().urlsafe())
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf |
Python | def _doWishlist(self, request, add):
"""Add session to user wishlist."""
prof = self._getProfileFromUser() # get user Profile
# check if session exists given websafeSessionKey
# get session; check that it exists
wssk = request.websafeSessionKey
session_key = ndb.Key(urlsafe=wssk)
session = session_key.get()
if not session:
raise endpoints.NotFoundException(
'No Session found with key: %s' % wssk)
entityType = session_key.kind()
if (entityType != 'Session'):
raise ConflictException(
"Can only add Session objects to wishlist")
if add:
# check if user already saved this session to wishlist
if wssk in prof.sessionKeysWishlist:
raise ConflictException(
"This session is already in your wishlist")
# save this session to user wishlist
prof.sessionKeysWishlist.append(wssk)
else:
if wssk not in prof.sessionKeysWishlist:
raise ConflictException(
"This session is not in your wishlist")
# remove from wishlist
prof.sessionKeysWishlist.remove(wssk)
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=True) | def _doWishlist(self, request, add):
"""Add session to user wishlist."""
prof = self._getProfileFromUser() # get user Profile
# check if session exists given websafeSessionKey
# get session; check that it exists
wssk = request.websafeSessionKey
session_key = ndb.Key(urlsafe=wssk)
session = session_key.get()
if not session:
raise endpoints.NotFoundException(
'No Session found with key: %s' % wssk)
entityType = session_key.kind()
if (entityType != 'Session'):
raise ConflictException(
"Can only add Session objects to wishlist")
if add:
# check if user already saved this session to wishlist
if wssk in prof.sessionKeysWishlist:
raise ConflictException(
"This session is already in your wishlist")
# save this session to user wishlist
prof.sessionKeysWishlist.append(wssk)
else:
if wssk not in prof.sessionKeysWishlist:
raise ConflictException(
"This session is not in your wishlist")
# remove from wishlist
prof.sessionKeysWishlist.remove(wssk)
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=True) |
Python | def _getSessionsInWishlist(self, request):
"""Given a Confernce, return all session in user wishlist"""
prof = self._getProfileFromUser() # get user Profile
# Get conference object
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# Get list of session keys from user profile
session_keys = [ndb.Key(urlsafe=wssk) for wssk in prof.sessionKeysWishlist]
# get all Sessions in wishlist
sessions = ndb.get_multi(session_keys)
# if the Session key parent is not the conference key, remove Session from sessions list
for session in sessions:
if session.key.parent() != conf.key:
sessions.remove(session)
# return set of Session objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
) | def _getSessionsInWishlist(self, request):
"""Given a Confernce, return all session in user wishlist"""
prof = self._getProfileFromUser() # get user Profile
# Get conference object
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# Get list of session keys from user profile
session_keys = [ndb.Key(urlsafe=wssk) for wssk in prof.sessionKeysWishlist]
# get all Sessions in wishlist
sessions = ndb.get_multi(session_keys)
# if the Session key parent is not the conference key, remove Session from sessions list
for session in sessions:
if session.key.parent() != conf.key:
sessions.remove(session)
# return set of Session objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
) |
Python | def _cacheFeaturedSpeaker(websafeConferenceKey, websafeSpeakerKey):
"""Create Featured Speaker & assign to memcache; used by
SetFeaturedSpeaker() in main.py.
"""
speaker_key = ndb.Key(urlsafe=websafeSpeakerKey)
conf_key = ndb.Key(urlsafe=websafeConferenceKey)
sessions = Session.query(ancestor=conf_key)
sessions = sessions.filter(Session.speaker == speaker_key)
numberOfSessions = sessions.count()
if (numberOfSessions > 1):
sessions = sessions.fetch()
string = "Don't miss out! %s is speaking as the following conferences: %s" % (
speaker_key.get().name,
', '.join(session.name for session in sessions))
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, string) | def _cacheFeaturedSpeaker(websafeConferenceKey, websafeSpeakerKey):
"""Create Featured Speaker & assign to memcache; used by
SetFeaturedSpeaker() in main.py.
"""
speaker_key = ndb.Key(urlsafe=websafeSpeakerKey)
conf_key = ndb.Key(urlsafe=websafeConferenceKey)
sessions = Session.query(ancestor=conf_key)
sessions = sessions.filter(Session.speaker == speaker_key)
numberOfSessions = sessions.count()
if (numberOfSessions > 1):
sessions = sessions.fetch()
string = "Don't miss out! %s is speaking as the following conferences: %s" % (
speaker_key.get().name,
', '.join(session.name for session in sessions))
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, string) |
Python | def _collect_field_args(expr: Expression) -> Tuple[bool, Dict[str, Expression]]:
"""Returns a tuple where the first value represents whether or not
the expression is a call to dataclass.field and the second is a
dictionary of the keyword arguments that field() was called with.
"""
if is_dataclasses_field_or_strawberry_field(expr):
expr = cast(CallExpr, expr)
# field() only takes keyword arguments.
args = {}
for name, arg in zip(expr.arg_names, expr.args):
assert name is not None
args[name] = arg
return True, args
return False, {} | def _collect_field_args(expr: Expression) -> Tuple[bool, Dict[str, Expression]]:
"""Returns a tuple where the first value represents whether or not
the expression is a call to dataclass.field and the second is a
dictionary of the keyword arguments that field() was called with.
"""
if is_dataclasses_field_or_strawberry_field(expr):
expr = cast(CallExpr, expr)
# field() only takes keyword arguments.
args = {}
for name, arg in zip(expr.arg_names, expr.args):
assert name is not None
args[name] = arg
return True, args
return False, {} |
Python | def transform(self) -> None:
"""Apply all the necessary transformations to the underlying
dataclass so as to ensure it is fully type checked according
to the rules in PEP 557.
"""
ctx = self._ctx
info = self._ctx.cls.info
attributes = self.collect_attributes()
if attributes is None:
# Some definitions are not ready, defer() should be already called.
return
for attr in attributes:
if attr.type is None:
ctx.api.defer()
return
decorator_arguments = {
"init": _get_decorator_bool_argument(self._ctx, "init", True),
"eq": _get_decorator_bool_argument(self._ctx, "eq", True),
"order": _get_decorator_bool_argument(self._ctx, "order", False),
"frozen": _get_decorator_bool_argument(self._ctx, "frozen", False),
}
# If there are no attributes, it may be that the semantic analyzer has not
# processed them yet. In order to work around this, we can simply skip
# generating __init__ if there are no attributes, because if the user
# truly did not define any, then the object default __init__ with an
# empty signature will be present anyway.
if (
decorator_arguments["init"]
and (
"__init__" not in info.names or info.names["__init__"].plugin_generated
)
and attributes
):
add_method(
ctx,
"__init__",
args=[attr.to_argument() for attr in attributes if attr.is_in_init],
return_type=NoneType(),
)
if (
decorator_arguments["eq"]
and info.get("__eq__") is None
or decorator_arguments["order"]
):
# Type variable for self types in generated methods.
obj_type = ctx.api.named_type("__builtins__.object")
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME, info.fullname + "." + SELF_TVAR_NAME, [], obj_type
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
# Add <, >, <=, >=, but only if the class has an eq method.
if decorator_arguments["order"]:
if not decorator_arguments["eq"]:
ctx.api.fail("eq must be True if order is True", ctx.cls)
for method_name in ["__lt__", "__gt__", "__le__", "__ge__"]:
# Like for __eq__ and __ne__, we want "other" to match
# the self type.
obj_type = ctx.api.named_type("__builtins__.object")
order_tvar_def = TypeVarDef(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
-1,
[],
obj_type,
)
order_other_type = TypeVarType(order_tvar_def)
order_return_type = ctx.api.named_type("__builtins__.bool")
order_args = [
Argument(
Var("other", order_other_type), order_other_type, None, ARG_POS
)
]
existing_method = info.get(method_name)
if existing_method is not None and not existing_method.plugin_generated:
assert existing_method.node
ctx.api.fail(
"You may not have a custom %s method when order=True"
% method_name,
existing_method.node,
)
add_method(
ctx,
method_name,
args=order_args,
return_type=order_return_type,
self_type=order_other_type,
tvar_def=order_tvar_def,
)
if decorator_arguments["frozen"]:
self._freeze(attributes)
self.reset_init_only_vars(info, attributes)
info.metadata["dataclass"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": decorator_arguments["frozen"],
} | def transform(self) -> None:
"""Apply all the necessary transformations to the underlying
dataclass so as to ensure it is fully type checked according
to the rules in PEP 557.
"""
ctx = self._ctx
info = self._ctx.cls.info
attributes = self.collect_attributes()
if attributes is None:
# Some definitions are not ready, defer() should be already called.
return
for attr in attributes:
if attr.type is None:
ctx.api.defer()
return
decorator_arguments = {
"init": _get_decorator_bool_argument(self._ctx, "init", True),
"eq": _get_decorator_bool_argument(self._ctx, "eq", True),
"order": _get_decorator_bool_argument(self._ctx, "order", False),
"frozen": _get_decorator_bool_argument(self._ctx, "frozen", False),
}
# If there are no attributes, it may be that the semantic analyzer has not
# processed them yet. In order to work around this, we can simply skip
# generating __init__ if there are no attributes, because if the user
# truly did not define any, then the object default __init__ with an
# empty signature will be present anyway.
if (
decorator_arguments["init"]
and (
"__init__" not in info.names or info.names["__init__"].plugin_generated
)
and attributes
):
add_method(
ctx,
"__init__",
args=[attr.to_argument() for attr in attributes if attr.is_in_init],
return_type=NoneType(),
)
if (
decorator_arguments["eq"]
and info.get("__eq__") is None
or decorator_arguments["order"]
):
# Type variable for self types in generated methods.
obj_type = ctx.api.named_type("__builtins__.object")
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME, info.fullname + "." + SELF_TVAR_NAME, [], obj_type
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
# Add <, >, <=, >=, but only if the class has an eq method.
if decorator_arguments["order"]:
if not decorator_arguments["eq"]:
ctx.api.fail("eq must be True if order is True", ctx.cls)
for method_name in ["__lt__", "__gt__", "__le__", "__ge__"]:
# Like for __eq__ and __ne__, we want "other" to match
# the self type.
obj_type = ctx.api.named_type("__builtins__.object")
order_tvar_def = TypeVarDef(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
-1,
[],
obj_type,
)
order_other_type = TypeVarType(order_tvar_def)
order_return_type = ctx.api.named_type("__builtins__.bool")
order_args = [
Argument(
Var("other", order_other_type), order_other_type, None, ARG_POS
)
]
existing_method = info.get(method_name)
if existing_method is not None and not existing_method.plugin_generated:
assert existing_method.node
ctx.api.fail(
"You may not have a custom %s method when order=True"
% method_name,
existing_method.node,
)
add_method(
ctx,
method_name,
args=order_args,
return_type=order_return_type,
self_type=order_other_type,
tvar_def=order_tvar_def,
)
if decorator_arguments["frozen"]:
self._freeze(attributes)
self.reset_init_only_vars(info, attributes)
info.metadata["dataclass"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": decorator_arguments["frozen"],
} |
Python | def reset_init_only_vars(
self, info: TypeInfo, attributes: List[DataclassAttribute]
) -> None:
"""Remove init-only vars from the class and reset init var declarations."""
for attr in attributes:
if attr.is_init_var:
if attr.name in info.names:
del info.names[attr.name]
else:
# Nodes of superclass InitVars not used in __init__
# cannot be reached.
assert attr.is_init_var
for stmt in info.defn.defs.body:
if isinstance(stmt, AssignmentStmt) and stmt.unanalyzed_type:
lvalue = stmt.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == attr.name:
# Reset node so that another semantic analysis pass will
# recreate a symbol node for this attribute.
lvalue.node = None | def reset_init_only_vars(
self, info: TypeInfo, attributes: List[DataclassAttribute]
) -> None:
"""Remove init-only vars from the class and reset init var declarations."""
for attr in attributes:
if attr.is_init_var:
if attr.name in info.names:
del info.names[attr.name]
else:
# Nodes of superclass InitVars not used in __init__
# cannot be reached.
assert attr.is_init_var
for stmt in info.defn.defs.body:
if isinstance(stmt, AssignmentStmt) and stmt.unanalyzed_type:
lvalue = stmt.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == attr.name:
# Reset node so that another semantic analysis pass will
# recreate a symbol node for this attribute.
lvalue.node = None |
Python | def collect_attributes(self) -> Optional[List[DataclassAttribute]]:
"""Collect all attributes declared in the dataclass and its parents.
All assignments of the form
a: SomeType
b: SomeOtherType = ...
are collected.
"""
# First, collect attributes belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
attrs: List[DataclassAttribute] = []
known_attrs: Set[str] = set()
for stmt in cls.defs.body:
# Any assignment that doesn't use the new type declaration
# syntax can be ignored out of hand.
if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax):
continue
# a: int, b: str = 1, 'foo' is not supported syntax so we
# don't have to worry about it.
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None:
# This name is likely blocked by a star import. We don't need
# to defer because defer() is already called by mark_incomplete().
continue
node = sym.node
if isinstance(node, PlaceholderNode):
# This node is not ready yet.
return None
assert isinstance(node, Var)
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
# x: InitVar[int] is turned into x: int and is removed from the class.
is_init_var = False
node_type = get_proper_type(node.type)
if (
isinstance(node_type, Instance)
and node_type.type.fullname == "dataclasses.InitVar"
):
is_init_var = True
node.type = node_type.args[0]
has_field_call, field_args = _collect_field_args(stmt.rvalue)
is_in_init_param = field_args.get("init")
if is_in_init_param is None:
is_in_init = True
else:
is_in_init = bool(ctx.api.parse_bool(is_in_init_param))
# fields with a resolver are never put in the __init__ method
if "resolver" in field_args:
is_in_init = False
has_default = False
# Ensure that something like x: int = field() is rejected
# after an attribute with a default.
if has_field_call:
has_default = "default" in field_args or "default_factory" in field_args
# All other assignments are already type checked.
elif not isinstance(stmt.rvalue, TempNode):
has_default = True
if not has_default:
# Make all non-default attributes implicit because they are de-facto set
# on self in the generated __init__(), not in the class body.
sym.implicit = True
known_attrs.add(lhs.name)
params = dict(
name=lhs.name,
is_in_init=is_in_init,
is_init_var=is_init_var,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=sym.type,
)
# add support for mypy >= 0.800 without breaking backwards compatibility
# https://github.com/python/mypy/pull/9380/file
# https://github.com/strawberry-graphql/strawberry/issues/678
try:
attribute = DataclassAttribute(**params) # type: ignore
except TypeError:
params["info"] = cls.info
attribute = DataclassAttribute(**params) # type: ignore
attrs.append(attribute)
# Next, collect attributes belonging to any class in the MRO
# as long as those attributes weren't already collected. This
# makes it possible to overwrite attributes in subclasses.
# copy() because we potentially modify all_attrs below and if
# this code requires debugging we'll have unmodified attrs laying around.
all_attrs = attrs.copy()
for info in cls.info.mro[1:-1]:
if "dataclass" not in info.metadata:
continue
super_attrs = []
# Each class depends on the set of attributes in its dataclass ancestors.
ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
for data in info.metadata["dataclass"]["attributes"]:
name = data["name"] # type: str
if name not in known_attrs:
attr = DataclassAttribute.deserialize(info, data, ctx.api)
known_attrs.add(name)
super_attrs.append(attr)
elif all_attrs:
# How early in the attribute list an attribute appears is
# determined by the reverse MRO, not simply MRO.
# See https://docs.python.org/3/library/dataclasses.html#inheritance
# for details.
for attr in all_attrs:
if attr.name == name:
all_attrs.remove(attr)
super_attrs.append(attr)
break
all_attrs = super_attrs + all_attrs
# Ensure that arguments without a default don't follow
# arguments that have a default.
found_default = False
for attr in all_attrs:
# If we find any attribute that is_in_init but that
# doesn't have a default after one that does have one,
# then that's an error.
if found_default and attr.is_in_init and not attr.has_default:
# If the issue comes from merging different classes, report it
# at the class definition point.
context = (
Context(line=attr.line, column=attr.column)
if attr in attrs
else ctx.cls
)
ctx.api.fail(
"Attributes without a default cannot follow attributes with one",
context,
)
found_default = found_default or (attr.has_default and attr.is_in_init)
return all_attrs | def collect_attributes(self) -> Optional[List[DataclassAttribute]]:
"""Collect all attributes declared in the dataclass and its parents.
All assignments of the form
a: SomeType
b: SomeOtherType = ...
are collected.
"""
# First, collect attributes belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
attrs: List[DataclassAttribute] = []
known_attrs: Set[str] = set()
for stmt in cls.defs.body:
# Any assignment that doesn't use the new type declaration
# syntax can be ignored out of hand.
if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax):
continue
# a: int, b: str = 1, 'foo' is not supported syntax so we
# don't have to worry about it.
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None:
# This name is likely blocked by a star import. We don't need
# to defer because defer() is already called by mark_incomplete().
continue
node = sym.node
if isinstance(node, PlaceholderNode):
# This node is not ready yet.
return None
assert isinstance(node, Var)
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
# x: InitVar[int] is turned into x: int and is removed from the class.
is_init_var = False
node_type = get_proper_type(node.type)
if (
isinstance(node_type, Instance)
and node_type.type.fullname == "dataclasses.InitVar"
):
is_init_var = True
node.type = node_type.args[0]
has_field_call, field_args = _collect_field_args(stmt.rvalue)
is_in_init_param = field_args.get("init")
if is_in_init_param is None:
is_in_init = True
else:
is_in_init = bool(ctx.api.parse_bool(is_in_init_param))
# fields with a resolver are never put in the __init__ method
if "resolver" in field_args:
is_in_init = False
has_default = False
# Ensure that something like x: int = field() is rejected
# after an attribute with a default.
if has_field_call:
has_default = "default" in field_args or "default_factory" in field_args
# All other assignments are already type checked.
elif not isinstance(stmt.rvalue, TempNode):
has_default = True
if not has_default:
# Make all non-default attributes implicit because they are de-facto set
# on self in the generated __init__(), not in the class body.
sym.implicit = True
known_attrs.add(lhs.name)
params = dict(
name=lhs.name,
is_in_init=is_in_init,
is_init_var=is_init_var,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=sym.type,
)
# add support for mypy >= 0.800 without breaking backwards compatibility
# https://github.com/python/mypy/pull/9380/file
# https://github.com/strawberry-graphql/strawberry/issues/678
try:
attribute = DataclassAttribute(**params) # type: ignore
except TypeError:
params["info"] = cls.info
attribute = DataclassAttribute(**params) # type: ignore
attrs.append(attribute)
# Next, collect attributes belonging to any class in the MRO
# as long as those attributes weren't already collected. This
# makes it possible to overwrite attributes in subclasses.
# copy() because we potentially modify all_attrs below and if
# this code requires debugging we'll have unmodified attrs laying around.
all_attrs = attrs.copy()
for info in cls.info.mro[1:-1]:
if "dataclass" not in info.metadata:
continue
super_attrs = []
# Each class depends on the set of attributes in its dataclass ancestors.
ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
for data in info.metadata["dataclass"]["attributes"]:
name = data["name"] # type: str
if name not in known_attrs:
attr = DataclassAttribute.deserialize(info, data, ctx.api)
known_attrs.add(name)
super_attrs.append(attr)
elif all_attrs:
# How early in the attribute list an attribute appears is
# determined by the reverse MRO, not simply MRO.
# See https://docs.python.org/3/library/dataclasses.html#inheritance
# for details.
for attr in all_attrs:
if attr.name == name:
all_attrs.remove(attr)
super_attrs.append(attr)
break
all_attrs = super_attrs + all_attrs
# Ensure that arguments without a default don't follow
# arguments that have a default.
found_default = False
for attr in all_attrs:
# If we find any attribute that is_in_init but that
# doesn't have a default after one that does have one,
# then that's an error.
if found_default and attr.is_in_init and not attr.has_default:
# If the issue comes from merging different classes, report it
# at the class definition point.
context = (
Context(line=attr.line, column=attr.column)
if attr in attrs
else ctx.cls
)
ctx.api.fail(
"Attributes without a default cannot follow attributes with one",
context,
)
found_default = found_default or (attr.has_default and attr.is_in_init)
return all_attrs |
Python | def _freeze(self, attributes: List[DataclassAttribute]) -> None:
"""Converts all attributes to @property methods in order to
emulate frozen classes.
"""
info = self._ctx.cls.info
for attr in attributes:
sym_node = info.names.get(attr.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = True
else:
var = attr.to_var()
var.info = info
var.is_property = True
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var) | def _freeze(self, attributes: List[DataclassAttribute]) -> None:
"""Converts all attributes to @property methods in order to
emulate frozen classes.
"""
info = self._ctx.cls.info
for attr in attributes:
sym_node = info.names.get(attr.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = True
else:
var = attr.to_var()
var.info = info
var.is_property = True
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var) |
Python | def enum(
_cls: EnumMeta = None, *, name=None, description=None
) -> Union[EnumMeta, Callable[[EnumMeta], EnumMeta]]:
"""Registers the enum in the GraphQL type system.
If name is passed, the name of the GraphQL type will be
the value passed of name instead of the Enum class name.
"""
def wrap(cls: EnumMeta) -> EnumMeta:
return _process_enum(cls, name, description)
if not _cls:
return wrap
return wrap(_cls) | def enum(
_cls: EnumMeta = None, *, name=None, description=None
) -> Union[EnumMeta, Callable[[EnumMeta], EnumMeta]]:
"""Registers the enum in the GraphQL type system.
If name is passed, the name of the GraphQL type will be
the value passed of name instead of the Enum class name.
"""
def wrap(cls: EnumMeta) -> EnumMeta:
return _process_enum(cls, name, description)
if not _cls:
return wrap
return wrap(_cls) |
Python | def pretty_print_graphql_operation(
operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]]
):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
if operation_name == "IntrospectionQuery":
return
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}]: {operation_name or 'No operation name'}")
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter())) | def pretty_print_graphql_operation(
operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]]
):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
if operation_name == "IntrospectionQuery":
return
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}]: {operation_name or 'No operation name'}")
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter())) |
Python | def convert_request_to_files_dict(request: Request) -> dict:
"""
request.files has the following format, even if only a single file is uploaded:
{
'textFile': [
sanic.request.File(
type='text/plain',
body=b'strawberry',
name='textFile.txt'
)
]
}
Note that the dictionary entries are lists.
"""
request_files: dict = request.files
files_dict: Dict[str, Union[BytesIO, List[BytesIO]]] = {}
for field_name, file_list in request_files.items():
assert len(file_list) == 1
files_dict[field_name] = BytesIO(file_list[0].body)
return files_dict | def convert_request_to_files_dict(request: Request) -> dict:
"""
request.files has the following format, even if only a single file is uploaded:
{
'textFile': [
sanic.request.File(
type='text/plain',
body=b'strawberry',
name='textFile.txt'
)
]
}
Note that the dictionary entries are lists.
"""
request_files: dict = request.files
files_dict: Dict[str, Union[BytesIO, List[BytesIO]]] = {}
for field_name, file_list in request_files.items():
assert len(file_list) == 1
files_dict[field_name] = BytesIO(file_list[0].body)
return files_dict |
Python | def value(node: ValueNode) -> Any:
"""Return useful value from any node."""
if hasattr(node, "fields"):
return {
field.name.value: value(field.value)
for field in node.fields # type: ignore
}
if hasattr(node, "values"):
return list(map(value, node.values)) # type: ignore
if hasattr(node, "name"):
return node.name.value # type: ignore
return getattr(node, "value", None) | def value(node: ValueNode) -> Any:
"""Return useful value from any node."""
if hasattr(node, "fields"):
return {
field.name.value: value(field.value)
for field in node.fields # type: ignore
}
if hasattr(node, "values"):
return list(map(value, node.values)) # type: ignore
if hasattr(node, "name"):
return node.name.value # type: ignore
return getattr(node, "value", None) |
Python | def selection(node: Node) -> Selection:
"""Return typed `Selection` based on node type."""
if hasattr(node, "alias"):
return SelectedField(node) # type: ignore
if hasattr(node, "selection_set"):
return InlineFragment(node) # type: ignore
return FragmentSpread(node) | def selection(node: Node) -> Selection:
"""Return typed `Selection` based on node type."""
if hasattr(node, "alias"):
return SelectedField(node) # type: ignore
if hasattr(node, "selection_set"):
return InlineFragment(node) # type: ignore
return FragmentSpread(node) |
Python | def _is_union(cls, annotation: Any) -> bool:
"""Returns True if annotation is a Union"""
annotation_origin = getattr(annotation, "__origin__", None)
return annotation_origin is typing.Union | def _is_union(cls, annotation: Any) -> bool:
"""Returns True if annotation is a Union"""
annotation_origin = getattr(annotation, "__origin__", None)
return annotation_origin is typing.Union |
Python | def __createObjects(self):
""" Create main objects and link them to scene.
"""
pmxModel = self.__model
obj_name = self.__safe_name(bpy.path.display_name(pmxModel.filepath), max_length=54)
self.__rig = mmd_model.Model.create(pmxModel.name, pmxModel.name_e, self.__scale, obj_name)
root = self.__rig.rootObject()
mmd_root = root.mmd_root
self.__root = root
root['import_folder'] = os.path.dirname(pmxModel.filepath)
txt = bpy.data.texts.new(obj_name)
txt.from_string(pmxModel.comment.replace('\r', ''))
mmd_root.comment_text = txt.name
txt = bpy.data.texts.new(obj_name+'_e')
txt.from_string(pmxModel.comment_e.replace('\r', ''))
mmd_root.comment_e_text = txt.name
self.__armObj = self.__rig.armature()
self.__armObj.hide = True
self.__armObj.select = False | def __createObjects(self):
""" Create main objects and link them to scene.
"""
pmxModel = self.__model
obj_name = self.__safe_name(bpy.path.display_name(pmxModel.filepath), max_length=54)
self.__rig = mmd_model.Model.create(pmxModel.name, pmxModel.name_e, self.__scale, obj_name)
root = self.__rig.rootObject()
mmd_root = root.mmd_root
self.__root = root
root['import_folder'] = os.path.dirname(pmxModel.filepath)
txt = bpy.data.texts.new(obj_name)
txt.from_string(pmxModel.comment.replace('\r', ''))
mmd_root.comment_text = txt.name
txt = bpy.data.texts.new(obj_name+'_e')
txt.from_string(pmxModel.comment_e.replace('\r', ''))
mmd_root.comment_e_text = txt.name
self.__armObj = self.__rig.armature()
self.__armObj.hide = True
self.__armObj.select = False |
Python | def __createEditBones(self, obj, pmx_bones):
""" create EditBones from pmx file data.
@return the list of bone names which can be accessed by the bone index of pmx data.
"""
editBoneTable = []
nameTable = []
specialTipBones = []
dependency_cycle_ik_bones = []
#for i, p_bone in enumerate(pmx_bones):
# if p_bone.isIK:
# if p_bone.target != -1:
# t = pmx_bones[p_bone.target]
# if p_bone.parent == t.parent:
# dependency_cycle_ik_bones.append(i)
from math import isfinite
def _VectorXZY(v):
return Vector(v).xzy if all(isfinite(n) for n in v) else Vector((0,0,0))
with bpyutils.edit_object(obj) as data:
for i in pmx_bones:
bone = data.edit_bones.new(name=i.name)
loc = _VectorXZY(i.location) * self.__scale
bone.head = loc
editBoneTable.append(bone)
nameTable.append(bone.name)
for i, (b_bone, m_bone) in enumerate(zip(editBoneTable, pmx_bones)):
if m_bone.parent != -1:
if i not in dependency_cycle_ik_bones:
b_bone.parent = editBoneTable[m_bone.parent]
else:
b_bone.parent = editBoneTable[m_bone.parent].parent
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if isinstance(m_bone.displayConnection, int):
if m_bone.displayConnection != -1:
b_bone.tail = editBoneTable[m_bone.displayConnection].head
else:
b_bone.tail = b_bone.head
else:
loc = _VectorXZY(m_bone.displayConnection) * self.__scale
b_bone.tail = b_bone.head + loc
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if m_bone.isIK and m_bone.target != -1:
logging.debug(' - checking IK links of %s', b_bone.name)
b_target = editBoneTable[m_bone.target]
for i in range(len(m_bone.ik_links)):
b_bone_link = editBoneTable[m_bone.ik_links[i].target]
if self.__fix_IK_links or b_bone_link.length < 0.001:
b_bone_tail = b_target if i == 0 else editBoneTable[m_bone.ik_links[i-1].target]
loc = b_bone_tail.head - b_bone_link.head
if loc.length < 0.001:
logging.warning(' ** unsolved IK link %s **', b_bone_link.name)
elif b_bone_tail.parent != b_bone_link:
logging.warning(' ** skipped IK link %s **', b_bone_link.name)
elif (b_bone_link.tail - b_bone_tail.head).length > 1e-4:
logging.debug(' * fix IK link %s', b_bone_link.name)
b_bone_link.tail = b_bone_link.head + loc
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
# Set the length of too short bones to 1 because Blender delete them.
if b_bone.length < 0.001:
if not self.__apply_bone_fixed_axis and m_bone.axis is not None:
fixed_axis = Vector(m_bone.axis)
if fixed_axis.length:
b_bone.tail = b_bone.head + fixed_axis.xzy.normalized() * self.__scale
else:
b_bone.tail = b_bone.head + Vector((0, 0, 1)) * self.__scale
else:
b_bone.tail = b_bone.head + Vector((0, 0, 1)) * self.__scale
if m_bone.displayConnection != -1 and m_bone.displayConnection != [0.0, 0.0, 0.0]:
logging.debug(' * special tip bone %s, display %s', b_bone.name, str(m_bone.displayConnection))
specialTipBones.append(b_bone.name)
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if m_bone.localCoordinate is not None:
FnBone.update_bone_roll(b_bone, m_bone.localCoordinate.x_axis, m_bone.localCoordinate.z_axis)
elif FnBone.has_auto_local_axis(m_bone.name):
FnBone.update_auto_bone_roll(b_bone)
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if isinstance(m_bone.displayConnection, int) and m_bone.displayConnection >= 0:
t = editBoneTable[m_bone.displayConnection]
if t.parent is None or t.parent != b_bone:
logging.warning(' * disconnected: %s (%d)<> %s', b_bone.name, len(b_bone.children), t.name)
continue
if pmx_bones[m_bone.displayConnection].isMovable:
logging.warning(' * disconnected: %s (%d)-> %s', b_bone.name, len(b_bone.children), t.name)
continue
if (b_bone.tail - t.head).length > 1e-4:
logging.warning(' * disconnected: %s (%d)=> %s', b_bone.name, len(b_bone.children), t.name)
continue
t.use_connect = True
return nameTable, specialTipBones | def __createEditBones(self, obj, pmx_bones):
""" create EditBones from pmx file data.
@return the list of bone names which can be accessed by the bone index of pmx data.
"""
editBoneTable = []
nameTable = []
specialTipBones = []
dependency_cycle_ik_bones = []
#for i, p_bone in enumerate(pmx_bones):
# if p_bone.isIK:
# if p_bone.target != -1:
# t = pmx_bones[p_bone.target]
# if p_bone.parent == t.parent:
# dependency_cycle_ik_bones.append(i)
from math import isfinite
def _VectorXZY(v):
return Vector(v).xzy if all(isfinite(n) for n in v) else Vector((0,0,0))
with bpyutils.edit_object(obj) as data:
for i in pmx_bones:
bone = data.edit_bones.new(name=i.name)
loc = _VectorXZY(i.location) * self.__scale
bone.head = loc
editBoneTable.append(bone)
nameTable.append(bone.name)
for i, (b_bone, m_bone) in enumerate(zip(editBoneTable, pmx_bones)):
if m_bone.parent != -1:
if i not in dependency_cycle_ik_bones:
b_bone.parent = editBoneTable[m_bone.parent]
else:
b_bone.parent = editBoneTable[m_bone.parent].parent
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if isinstance(m_bone.displayConnection, int):
if m_bone.displayConnection != -1:
b_bone.tail = editBoneTable[m_bone.displayConnection].head
else:
b_bone.tail = b_bone.head
else:
loc = _VectorXZY(m_bone.displayConnection) * self.__scale
b_bone.tail = b_bone.head + loc
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if m_bone.isIK and m_bone.target != -1:
logging.debug(' - checking IK links of %s', b_bone.name)
b_target = editBoneTable[m_bone.target]
for i in range(len(m_bone.ik_links)):
b_bone_link = editBoneTable[m_bone.ik_links[i].target]
if self.__fix_IK_links or b_bone_link.length < 0.001:
b_bone_tail = b_target if i == 0 else editBoneTable[m_bone.ik_links[i-1].target]
loc = b_bone_tail.head - b_bone_link.head
if loc.length < 0.001:
logging.warning(' ** unsolved IK link %s **', b_bone_link.name)
elif b_bone_tail.parent != b_bone_link:
logging.warning(' ** skipped IK link %s **', b_bone_link.name)
elif (b_bone_link.tail - b_bone_tail.head).length > 1e-4:
logging.debug(' * fix IK link %s', b_bone_link.name)
b_bone_link.tail = b_bone_link.head + loc
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
# Set the length of too short bones to 1 because Blender delete them.
if b_bone.length < 0.001:
if not self.__apply_bone_fixed_axis and m_bone.axis is not None:
fixed_axis = Vector(m_bone.axis)
if fixed_axis.length:
b_bone.tail = b_bone.head + fixed_axis.xzy.normalized() * self.__scale
else:
b_bone.tail = b_bone.head + Vector((0, 0, 1)) * self.__scale
else:
b_bone.tail = b_bone.head + Vector((0, 0, 1)) * self.__scale
if m_bone.displayConnection != -1 and m_bone.displayConnection != [0.0, 0.0, 0.0]:
logging.debug(' * special tip bone %s, display %s', b_bone.name, str(m_bone.displayConnection))
specialTipBones.append(b_bone.name)
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if m_bone.localCoordinate is not None:
FnBone.update_bone_roll(b_bone, m_bone.localCoordinate.x_axis, m_bone.localCoordinate.z_axis)
elif FnBone.has_auto_local_axis(m_bone.name):
FnBone.update_auto_bone_roll(b_bone)
for b_bone, m_bone in zip(editBoneTable, pmx_bones):
if isinstance(m_bone.displayConnection, int) and m_bone.displayConnection >= 0:
t = editBoneTable[m_bone.displayConnection]
if t.parent is None or t.parent != b_bone:
logging.warning(' * disconnected: %s (%d)<> %s', b_bone.name, len(b_bone.children), t.name)
continue
if pmx_bones[m_bone.displayConnection].isMovable:
logging.warning(' * disconnected: %s (%d)-> %s', b_bone.name, len(b_bone.children), t.name)
continue
if (b_bone.tail - t.head).length > 1e-4:
logging.warning(' * disconnected: %s (%d)=> %s', b_bone.name, len(b_bone.children), t.name)
continue
t.use_connect = True
return nameTable, specialTipBones |
Python | def __applyIk(self, index, pmx_bone, pose_bones):
""" create a IK bone constraint
If the IK bone and the target bone is separated, a dummy IK target bone is created as a child of the IK bone.
@param index the bone index
@param pmx_bone pmx.Bone
@param pose_bones the list of PoseBones sorted by the bone index
"""
# for tracking mmd ik target, simple explaination:
# + Root
# | + link1
# | + link0 (ik_constraint_bone) <- ik constraint, chain_count=2
# | + IK target (ik_target) <- constraint 'mmd_ik_target_override', subtarget=link0
# + IK bone (ik_bone)
#
# it is possible that the link0 is the IK target,
# so ik constraint will be on link1, chain_count=1
# the IK target isn't affected by IK bone
ik_bone = pose_bones[index]
ik_target = pose_bones[pmx_bone.target]
ik_constraint_bone = ik_target.parent
is_valid_ik = False
if len(pmx_bone.ik_links) > 0:
ik_constraint_bone_real = pose_bones[pmx_bone.ik_links[0].target]
if ik_constraint_bone_real == ik_target:
if len(pmx_bone.ik_links) > 1:
ik_constraint_bone_real = pose_bones[pmx_bone.ik_links[1].target]
del pmx_bone.ik_links[0]
logging.warning(' * fix IK settings of IK bone (%s)', ik_bone.name)
is_valid_ik = (ik_constraint_bone == ik_constraint_bone_real)
if not is_valid_ik:
ik_constraint_bone = ik_constraint_bone_real
logging.warning(' * IK bone (%s) warning: IK target (%s) is not a child of IK link 0 (%s)',
ik_bone.name, ik_target.name, ik_constraint_bone.name)
elif any(pose_bones[i.target].parent != pose_bones[j.target] for i, j in zip(pmx_bone.ik_links, pmx_bone.ik_links[1:])):
logging.warning(' * Invalid IK bone (%s): IK chain does not follow parent-child relationship', ik_bone.name)
return
if ik_constraint_bone is None or len(pmx_bone.ik_links) < 1:
logging.warning(' * Invalid IK bone (%s)', ik_bone.name)
return
c = ik_target.constraints.new(type='DAMPED_TRACK')
c.name = 'mmd_ik_target_override'
c.mute = True
c.influence = 0
c.target = self.__armObj
c.subtarget = ik_constraint_bone.name
if not is_valid_ik or next((c for c in ik_constraint_bone.constraints if c.type == 'IK' and c.is_valid), None):
c.name = 'mmd_ik_target_custom'
c.subtarget = ik_bone.name # point to IK control bone
ik_bone.mmd_bone.ik_rotation_constraint = pmx_bone.rotationConstraint
use_custom_ik = True
else:
ik_constraint_bone.mmd_bone.ik_rotation_constraint = pmx_bone.rotationConstraint
use_custom_ik = False
ikConst = self.__rig.create_ik_constraint(ik_constraint_bone, ik_bone)
ikConst.iterations = pmx_bone.loopCount
ikConst.chain_count = len(pmx_bone.ik_links)
if not is_valid_ik:
ikConst.pole_target = self.__armObj # make it an incomplete/invalid setting
for idx, i in enumerate(pmx_bone.ik_links):
if use_custom_ik or i.target in self.__blender_ik_links:
c = ik_bone.constraints.new(type='LIMIT_ROTATION')
c.mute = True
c.influence = 0
c.name = 'mmd_ik_limit_custom%d'%idx
use_limits = c.use_limit_x = c.use_limit_y = c.use_limit_z = (i.maximumAngle is not None)
if use_limits:
minimum, maximum = self.convertIKLimitAngles(i.minimumAngle, i.maximumAngle, pose_bones[i.target].bone.matrix_local)
c.max_x, c.max_y, c.max_z = maximum
c.min_x, c.min_y, c.min_z = minimum
continue
self.__blender_ik_links.add(i.target)
if i.maximumAngle is not None:
bone = pose_bones[i.target]
minimum, maximum = self.convertIKLimitAngles(i.minimumAngle, i.maximumAngle, bone.bone.matrix_local)
bone.use_ik_limit_x = True
bone.use_ik_limit_y = True
bone.use_ik_limit_z = True
bone.ik_max_x, bone.ik_max_y, bone.ik_max_z = maximum
bone.ik_min_x, bone.ik_min_y, bone.ik_min_z = minimum
c = bone.constraints.new(type='LIMIT_ROTATION')
c.mute = not is_valid_ik
c.name = 'mmd_ik_limit_override'
c.owner_space = 'POSE' # WORLD/POSE/LOCAL
c.max_x, c.max_y, c.max_z = maximum
c.min_x, c.min_y, c.min_z = minimum
c.use_limit_x = bone.ik_max_x != c.max_x or bone.ik_min_x != c.min_x
c.use_limit_y = bone.ik_max_y != c.max_y or bone.ik_min_y != c.min_y
c.use_limit_z = bone.ik_max_z != c.max_z or bone.ik_min_z != c.min_z | def __applyIk(self, index, pmx_bone, pose_bones):
""" create a IK bone constraint
If the IK bone and the target bone is separated, a dummy IK target bone is created as a child of the IK bone.
@param index the bone index
@param pmx_bone pmx.Bone
@param pose_bones the list of PoseBones sorted by the bone index
"""
# for tracking mmd ik target, simple explaination:
# + Root
# | + link1
# | + link0 (ik_constraint_bone) <- ik constraint, chain_count=2
# | + IK target (ik_target) <- constraint 'mmd_ik_target_override', subtarget=link0
# + IK bone (ik_bone)
#
# it is possible that the link0 is the IK target,
# so ik constraint will be on link1, chain_count=1
# the IK target isn't affected by IK bone
ik_bone = pose_bones[index]
ik_target = pose_bones[pmx_bone.target]
ik_constraint_bone = ik_target.parent
is_valid_ik = False
if len(pmx_bone.ik_links) > 0:
ik_constraint_bone_real = pose_bones[pmx_bone.ik_links[0].target]
if ik_constraint_bone_real == ik_target:
if len(pmx_bone.ik_links) > 1:
ik_constraint_bone_real = pose_bones[pmx_bone.ik_links[1].target]
del pmx_bone.ik_links[0]
logging.warning(' * fix IK settings of IK bone (%s)', ik_bone.name)
is_valid_ik = (ik_constraint_bone == ik_constraint_bone_real)
if not is_valid_ik:
ik_constraint_bone = ik_constraint_bone_real
logging.warning(' * IK bone (%s) warning: IK target (%s) is not a child of IK link 0 (%s)',
ik_bone.name, ik_target.name, ik_constraint_bone.name)
elif any(pose_bones[i.target].parent != pose_bones[j.target] for i, j in zip(pmx_bone.ik_links, pmx_bone.ik_links[1:])):
logging.warning(' * Invalid IK bone (%s): IK chain does not follow parent-child relationship', ik_bone.name)
return
if ik_constraint_bone is None or len(pmx_bone.ik_links) < 1:
logging.warning(' * Invalid IK bone (%s)', ik_bone.name)
return
c = ik_target.constraints.new(type='DAMPED_TRACK')
c.name = 'mmd_ik_target_override'
c.mute = True
c.influence = 0
c.target = self.__armObj
c.subtarget = ik_constraint_bone.name
if not is_valid_ik or next((c for c in ik_constraint_bone.constraints if c.type == 'IK' and c.is_valid), None):
c.name = 'mmd_ik_target_custom'
c.subtarget = ik_bone.name # point to IK control bone
ik_bone.mmd_bone.ik_rotation_constraint = pmx_bone.rotationConstraint
use_custom_ik = True
else:
ik_constraint_bone.mmd_bone.ik_rotation_constraint = pmx_bone.rotationConstraint
use_custom_ik = False
ikConst = self.__rig.create_ik_constraint(ik_constraint_bone, ik_bone)
ikConst.iterations = pmx_bone.loopCount
ikConst.chain_count = len(pmx_bone.ik_links)
if not is_valid_ik:
ikConst.pole_target = self.__armObj # make it an incomplete/invalid setting
for idx, i in enumerate(pmx_bone.ik_links):
if use_custom_ik or i.target in self.__blender_ik_links:
c = ik_bone.constraints.new(type='LIMIT_ROTATION')
c.mute = True
c.influence = 0
c.name = 'mmd_ik_limit_custom%d'%idx
use_limits = c.use_limit_x = c.use_limit_y = c.use_limit_z = (i.maximumAngle is not None)
if use_limits:
minimum, maximum = self.convertIKLimitAngles(i.minimumAngle, i.maximumAngle, pose_bones[i.target].bone.matrix_local)
c.max_x, c.max_y, c.max_z = maximum
c.min_x, c.min_y, c.min_z = minimum
continue
self.__blender_ik_links.add(i.target)
if i.maximumAngle is not None:
bone = pose_bones[i.target]
minimum, maximum = self.convertIKLimitAngles(i.minimumAngle, i.maximumAngle, bone.bone.matrix_local)
bone.use_ik_limit_x = True
bone.use_ik_limit_y = True
bone.use_ik_limit_z = True
bone.ik_max_x, bone.ik_max_y, bone.ik_max_z = maximum
bone.ik_min_x, bone.ik_min_y, bone.ik_min_z = minimum
c = bone.constraints.new(type='LIMIT_ROTATION')
c.mute = not is_valid_ik
c.name = 'mmd_ik_limit_override'
c.owner_space = 'POSE' # WORLD/POSE/LOCAL
c.max_x, c.max_y, c.max_z = maximum
c.min_x, c.min_y, c.min_z = minimum
c.use_limit_x = bone.ik_max_x != c.max_x or bone.ik_min_x != c.min_x
c.use_limit_y = bone.ik_max_y != c.max_y or bone.ik_min_y != c.min_y
c.use_limit_z = bone.ik_max_z != c.max_z or bone.ik_min_z != c.min_z |
Python | def __exportIK(self, bone_map):
""" Export IK constraints
@param bone_map the dictionary to map Blender bone names to bone indices of the pmx.model instance.
"""
pmx_bones = self.__model.bones
arm = self.__armature
ik_loop_factor = max(arm.get('mmd_ik_loop_factor', 1), 1)
pose_bones = arm.pose.bones
ik_target_custom_map = {getattr(b.constraints.get('mmd_ik_target_custom', None), 'subtarget', None):b for b in pose_bones if not b.is_mmd_shadow_bone}
def __ik_target_bone_get(ik_constraint_bone, ik_bone):
if ik_bone.name in ik_target_custom_map:
logging.debug(' (use "mmd_ik_target_custom")')
return ik_target_custom_map[ik_bone.name] # for supporting the ik target which is not a child of ik_constraint_bone
return self.__get_ik_target_bone(ik_constraint_bone) # this only search the children of ik_constraint_bone
for bone in pose_bones:
if bone.is_mmd_shadow_bone:
continue
for c in bone.constraints:
if c.type == 'IK' and not c.mute:
logging.debug(' Found IK constraint on %s', bone.name)
ik_pose_bone = self.__get_ik_control_bone(c)
if ik_pose_bone is None:
logging.warning(' * Invalid IK constraint "%s" on bone %s', c.name, bone.name)
continue
ik_bone_index = bone_map.get(ik_pose_bone.name, -1)
if ik_bone_index < 0:
logging.warning(' * IK bone "%s" not found !!!', ik_pose_bone.name)
continue
pmx_ik_bone = pmx_bones[ik_bone_index]
if pmx_ik_bone.isIK:
logging.warning(' * IK bone "%s" is used by another IK setting !!!', ik_pose_bone.name)
continue
ik_chain0 = bone if c.use_tail else bone.parent
ik_target_bone = __ik_target_bone_get(bone, ik_pose_bone) if c.use_tail else bone
if ik_target_bone is None:
logging.warning(' * IK bone: %s, IK Target not found !!!', ik_pose_bone.name)
continue
logging.debug(' - IK bone: %s, IK Target: %s', ik_pose_bone.name, ik_target_bone.name)
pmx_ik_bone.isIK = True
pmx_ik_bone.loopCount = max(int(c.iterations/ik_loop_factor), 1)
if ik_pose_bone.name in ik_target_custom_map:
pmx_ik_bone.rotationConstraint = ik_pose_bone.mmd_bone.ik_rotation_constraint
else:
pmx_ik_bone.rotationConstraint = bone.mmd_bone.ik_rotation_constraint
pmx_ik_bone.target = bone_map[ik_target_bone.name]
pmx_ik_bone.ik_links = self.__exportIKLinks(ik_chain0, c.chain_count, bone_map, [], ik_pose_bone) | def __exportIK(self, bone_map):
""" Export IK constraints
@param bone_map the dictionary to map Blender bone names to bone indices of the pmx.model instance.
"""
pmx_bones = self.__model.bones
arm = self.__armature
ik_loop_factor = max(arm.get('mmd_ik_loop_factor', 1), 1)
pose_bones = arm.pose.bones
ik_target_custom_map = {getattr(b.constraints.get('mmd_ik_target_custom', None), 'subtarget', None):b for b in pose_bones if not b.is_mmd_shadow_bone}
def __ik_target_bone_get(ik_constraint_bone, ik_bone):
if ik_bone.name in ik_target_custom_map:
logging.debug(' (use "mmd_ik_target_custom")')
return ik_target_custom_map[ik_bone.name] # for supporting the ik target which is not a child of ik_constraint_bone
return self.__get_ik_target_bone(ik_constraint_bone) # this only search the children of ik_constraint_bone
for bone in pose_bones:
if bone.is_mmd_shadow_bone:
continue
for c in bone.constraints:
if c.type == 'IK' and not c.mute:
logging.debug(' Found IK constraint on %s', bone.name)
ik_pose_bone = self.__get_ik_control_bone(c)
if ik_pose_bone is None:
logging.warning(' * Invalid IK constraint "%s" on bone %s', c.name, bone.name)
continue
ik_bone_index = bone_map.get(ik_pose_bone.name, -1)
if ik_bone_index < 0:
logging.warning(' * IK bone "%s" not found !!!', ik_pose_bone.name)
continue
pmx_ik_bone = pmx_bones[ik_bone_index]
if pmx_ik_bone.isIK:
logging.warning(' * IK bone "%s" is used by another IK setting !!!', ik_pose_bone.name)
continue
ik_chain0 = bone if c.use_tail else bone.parent
ik_target_bone = __ik_target_bone_get(bone, ik_pose_bone) if c.use_tail else bone
if ik_target_bone is None:
logging.warning(' * IK bone: %s, IK Target not found !!!', ik_pose_bone.name)
continue
logging.debug(' - IK bone: %s, IK Target: %s', ik_pose_bone.name, ik_target_bone.name)
pmx_ik_bone.isIK = True
pmx_ik_bone.loopCount = max(int(c.iterations/ik_loop_factor), 1)
if ik_pose_bone.name in ik_target_custom_map:
pmx_ik_bone.rotationConstraint = ik_pose_bone.mmd_bone.ik_rotation_constraint
else:
pmx_ik_bone.rotationConstraint = bone.mmd_bone.ik_rotation_constraint
pmx_ik_bone.target = bone_map[ik_target_bone.name]
pmx_ik_bone.ik_links = self.__exportIKLinks(ik_chain0, c.chain_count, bone_map, [], ik_pose_bone) |
Python | def __get_ik_target_bone(self, target_bone):
""" Get mmd ik target bone.
Args:
target_bone: A blender PoseBone
Returns:
A bpy.types.PoseBone object which is the closest bone from the tail position of target_bone.
Return None if target_bone has no child bones.
"""
valid_children = [c for c in target_bone.children if not c.is_mmd_shadow_bone]
# search 'mmd_ik_target_override' first
for c in valid_children:
ik_target_override = c.constraints.get('mmd_ik_target_override', None)
if ik_target_override and ik_target_override.subtarget == target_bone.name:
logging.debug(' (use "mmd_ik_target_override")')
return c
r = None
min_length = None
for c in valid_children:
if c.bone.use_connect:
return c
length = (c.head - target_bone.tail).length
if min_length is None or length < min_length:
min_length = length
r = c
return r | def __get_ik_target_bone(self, target_bone):
""" Get mmd ik target bone.
Args:
target_bone: A blender PoseBone
Returns:
A bpy.types.PoseBone object which is the closest bone from the tail position of target_bone.
Return None if target_bone has no child bones.
"""
valid_children = [c for c in target_bone.children if not c.is_mmd_shadow_bone]
# search 'mmd_ik_target_override' first
for c in valid_children:
ik_target_override = c.constraints.get('mmd_ik_target_override', None)
if ik_target_override and ik_target_override.subtarget == target_bone.name:
logging.debug(' (use "mmd_ik_target_override")')
return c
r = None
min_length = None
for c in valid_children:
if c.bone.use_connect:
return c
length = (c.head - target_bone.tail).length
if min_length is None or length < min_length:
min_length = length
r = c
return r |
Python | def delete_comment(cls,comment_id):
'''
Function that deletes a specific single comment from the comments table and database
Args:
comment_id : specific comment id
'''
comment = Comment.query.filter_by(id=comment_id).delete()
db.session.commit() | def delete_comment(cls,comment_id):
'''
Function that deletes a specific single comment from the comments table and database
Args:
comment_id : specific comment id
'''
comment = Comment.query.filter_by(id=comment_id).delete()
db.session.commit() |
Python | def checkout_branch(self, git_ref: str):
"""
Perform a `git checkout`
:param git_ref: ref to check out
"""
if git_ref in self.local_project.git_repo.heads:
head = self.local_project.git_repo.heads[git_ref]
else:
raise PackitException(f"Branch {git_ref} does not exist")
head.checkout() | def checkout_branch(self, git_ref: str):
"""
Perform a `git checkout`
:param git_ref: ref to check out
"""
if git_ref in self.local_project.git_repo.heads:
head = self.local_project.git_repo.heads[git_ref]
else:
raise PackitException(f"Branch {git_ref} does not exist")
head.checkout() |
Python | def push(self, refspec: str, remote_name: str = "origin", force: bool = False):
""" push selected refspec to a git remote """
logger.info(f"pushing changes to remote {remote_name} using refspec {refspec}")
push_infos_list: Iterable[PushInfo] = self.local_project.push(
refspec, remote_name=remote_name, force=force
)
for pi in push_infos_list:
logger.info(f"push summary: {pi.summary}")
push_failed = [
bool(x & pi.flags)
for x in (
PushInfo.ERROR,
PushInfo.REMOTE_FAILURE,
PushInfo.REMOTE_REJECTED,
PushInfo.NO_MATCH, # this looks like it's not used in gitpython
PushInfo.REJECTED,
PushInfo.UP_TO_DATE, # is this an error?
)
]
if any(push_failed):
logger.debug(f"push_info flags: {pi.flags}")
raise PackitException(
f"We were unable to push to dist-git: {pi.summary}."
) | def push(self, refspec: str, remote_name: str = "origin", force: bool = False):
""" push selected refspec to a git remote """
logger.info(f"pushing changes to remote {remote_name} using refspec {refspec}")
push_infos_list: Iterable[PushInfo] = self.local_project.push(
refspec, remote_name=remote_name, force=force
)
for pi in push_infos_list:
logger.info(f"push summary: {pi.summary}")
push_failed = [
bool(x & pi.flags)
for x in (
PushInfo.ERROR,
PushInfo.REMOTE_FAILURE,
PushInfo.REMOTE_REJECTED,
PushInfo.NO_MATCH, # this looks like it's not used in gitpython
PushInfo.REJECTED,
PushInfo.UP_TO_DATE, # is this an error?
)
]
if any(push_failed):
logger.debug(f"push_info flags: {pi.flags}")
raise PackitException(
f"We were unable to push to dist-git: {pi.summary}."
) |
Python | def parse_loaded_config(loaded_config: dict) -> PackageConfig:
"""Tries to parse the config to PackageConfig."""
logger.debug(f"Package config:\n{json.dumps(loaded_config, indent=4)}")
try:
package_config = PackageConfig.get_from_dict(
raw_dict=loaded_config, validate=True
)
return package_config
except Exception as ex:
logger.error(f"Cannot parse package config. {ex}.")
raise Exception(f"Cannot parse package config: {ex}.") | def parse_loaded_config(loaded_config: dict) -> PackageConfig:
"""Tries to parse the config to PackageConfig."""
logger.debug(f"Package config:\n{json.dumps(loaded_config, indent=4)}")
try:
package_config = PackageConfig.get_from_dict(
raw_dict=loaded_config, validate=True
)
return package_config
except Exception as ex:
logger.error(f"Cannot parse package config. {ex}.")
raise Exception(f"Cannot parse package config: {ex}.") |
Python | def create_srpm(
self, output_file: str = None, upstream_ref: str = None, srpm_dir: str = None
) -> Path:
"""
Create srpm from the upstream repo
:param upstream_ref: git ref to upstream commit
:param output_file: path + filename where the srpm should be written, defaults to cwd
:param srpm_dir: path to the directory where the srpm is meant to be placed
:return: a path to the srpm
"""
self.up.run_action(actions=ActionName.post_upstream_clone)
current_git_describe_version = self.up.get_current_version()
upstream_ref = upstream_ref or self.package_config.upstream_ref
commit = self.up.local_project.git_repo.active_branch.commit.hexsha[:8]
if self.up.running_in_service():
relative_to = Path(self.config.command_handler_work_dir)
else:
relative_to = Path.cwd()
if upstream_ref:
# source-git code: fetch the tarball and don't check out the upstream ref
self.up.fetch_upstream_archive()
source_dir = self.up.absolute_specfile_dir.relative_to(relative_to)
if self.up.with_action(action=ActionName.create_patches):
patches = self.up.create_patches(
upstream=upstream_ref,
destination=str(self.up.absolute_specfile_dir),
)
self.up.add_patches_to_specfile(patches)
old_release = self.up.specfile.get_release_number()
try:
old_release_int = int(old_release)
new_release = old_release_int + 1
except ValueError:
new_release = old_release
release_to_update = f"{new_release}.g{commit}"
msg = f"Downstream changes ({commit})"
self.up.set_spec_version(
release=release_to_update, changelog_entry=f"- {msg}"
)
else:
archive = self.up.create_archive(version=current_git_describe_version)
env = {
"PACKIT_PROJECT_VERSION": current_git_describe_version,
"PACKIT_PROJECT_COMMIT": commit,
"PACKIT_PROJECT_ARCHIVE": archive,
}
if self.up.with_action(action=ActionName.fix_spec, env=env):
self.up.fix_spec(
archive=archive, version=current_git_describe_version, commit=commit
)
if self.up.local_project.working_dir.startswith(str(relative_to)):
source_dir = Path(self.up.local_project.working_dir).relative_to(
relative_to
)
else:
source_dir = Path(self.up.local_project.working_dir)
srpm_path = self.up.create_srpm(
srpm_path=output_file, srpm_dir=srpm_dir, source_dir=source_dir
)
if not srpm_path.exists():
raise PackitException(
f"SRPM was created successfully, but can't be found at {srpm_path}"
)
return srpm_path | def create_srpm(
self, output_file: str = None, upstream_ref: str = None, srpm_dir: str = None
) -> Path:
"""
Create srpm from the upstream repo
:param upstream_ref: git ref to upstream commit
:param output_file: path + filename where the srpm should be written, defaults to cwd
:param srpm_dir: path to the directory where the srpm is meant to be placed
:return: a path to the srpm
"""
self.up.run_action(actions=ActionName.post_upstream_clone)
current_git_describe_version = self.up.get_current_version()
upstream_ref = upstream_ref or self.package_config.upstream_ref
commit = self.up.local_project.git_repo.active_branch.commit.hexsha[:8]
if self.up.running_in_service():
relative_to = Path(self.config.command_handler_work_dir)
else:
relative_to = Path.cwd()
if upstream_ref:
# source-git code: fetch the tarball and don't check out the upstream ref
self.up.fetch_upstream_archive()
source_dir = self.up.absolute_specfile_dir.relative_to(relative_to)
if self.up.with_action(action=ActionName.create_patches):
patches = self.up.create_patches(
upstream=upstream_ref,
destination=str(self.up.absolute_specfile_dir),
)
self.up.add_patches_to_specfile(patches)
old_release = self.up.specfile.get_release_number()
try:
old_release_int = int(old_release)
new_release = old_release_int + 1
except ValueError:
new_release = old_release
release_to_update = f"{new_release}.g{commit}"
msg = f"Downstream changes ({commit})"
self.up.set_spec_version(
release=release_to_update, changelog_entry=f"- {msg}"
)
else:
archive = self.up.create_archive(version=current_git_describe_version)
env = {
"PACKIT_PROJECT_VERSION": current_git_describe_version,
"PACKIT_PROJECT_COMMIT": commit,
"PACKIT_PROJECT_ARCHIVE": archive,
}
if self.up.with_action(action=ActionName.fix_spec, env=env):
self.up.fix_spec(
archive=archive, version=current_git_describe_version, commit=commit
)
if self.up.local_project.working_dir.startswith(str(relative_to)):
source_dir = Path(self.up.local_project.working_dir).relative_to(
relative_to
)
else:
source_dir = Path(self.up.local_project.working_dir)
srpm_path = self.up.create_srpm(
srpm_path=output_file, srpm_dir=srpm_dir, source_dir=source_dir
)
if not srpm_path.exists():
raise PackitException(
f"SRPM was created successfully, but can't be found at {srpm_path}"
)
return srpm_path |
Python | def _copr_web_build_url(build: Munch):
""" Construct web frontend url because build.repo_url is not much user-friendly."""
return (
"https://copr.fedorainfracloud.org/coprs/"
f"{build.ownername}/{build.projectname}/build/{build.id}/"
) | def _copr_web_build_url(build: Munch):
""" Construct web frontend url because build.repo_url is not much user-friendly."""
return (
"https://copr.fedorainfracloud.org/coprs/"
f"{build.ownername}/{build.projectname}/build/{build.id}/"
) |
Python | def run_copr_build(
self,
project: str,
chroots: List[str],
owner: str = None,
description: str = None,
instructions: str = None,
) -> Tuple[int, str]:
"""
Submit a build to copr build system using an SRPM using the current checkout.
:param project: name of the copr project to build
inside (defaults to something long and ugly)
:param chroots: a list of COPR chroots (targets) e.g. fedora-rawhide-x86_64
:param owner: defaults to username from copr config file
:param description: description of the project
:param instructions: installation instructions for the project
:return: id of the created build and url to the build web page
"""
# get info
configured_owner = self.copr.config.get("username")
owner = owner or configured_owner
try:
copr_proj = self.copr.project_proxy.get(owner, project)
# make sure or project has chroots set correctly
if set(copr_proj.chroot_repos.keys()) != set(chroots):
logger.info(f"Updating targets on project {owner}/{project}")
logger.debug(f"old = {set(copr_proj.chroot_repos.keys())}")
logger.debug(f"new = {set(chroots)}")
self.copr.project_proxy.edit(
owner,
project,
chroots=chroots,
description=description,
instructions=instructions,
)
except CoprNoResultException:
if owner == configured_owner:
logger.info(f"Copr project {owner}/{project} not found. Creating new.")
self.copr.project_proxy.add(
ownername=owner,
projectname=project,
chroots=chroots,
description=(
description
or "Continuous builds initiated by packit service.\n"
"For more info check out https://packit.dev/"
),
contact="https://github.com/packit-service/packit/issues",
# don't show project on Copr homepage
unlisted_on_hp=True,
# delete project after the specified period of time
delete_after_days=60,
instructions=instructions
or "You can check out the upstream project"
f"{self.upstream_local_project.git_url} to find out how to consume these"
"builds. This copr project is created and handled by the packit project"
"(https://packit.dev/).",
)
else:
raise PackitInvalidConfigException(
f"Copr project {owner}/{project} not found."
)
srpm_path = self.create_srpm(srpm_dir=self.up.local_project.working_dir)
logger.debug(f"owner={owner}, project={project}, path={srpm_path}")
build = self.copr.build_proxy.create_from_file(owner, project, srpm_path)
return build.id, self._copr_web_build_url(build) | def run_copr_build(
self,
project: str,
chroots: List[str],
owner: str = None,
description: str = None,
instructions: str = None,
) -> Tuple[int, str]:
"""
Submit a build to copr build system using an SRPM using the current checkout.
:param project: name of the copr project to build
inside (defaults to something long and ugly)
:param chroots: a list of COPR chroots (targets) e.g. fedora-rawhide-x86_64
:param owner: defaults to username from copr config file
:param description: description of the project
:param instructions: installation instructions for the project
:return: id of the created build and url to the build web page
"""
# get info
configured_owner = self.copr.config.get("username")
owner = owner or configured_owner
try:
copr_proj = self.copr.project_proxy.get(owner, project)
# make sure or project has chroots set correctly
if set(copr_proj.chroot_repos.keys()) != set(chroots):
logger.info(f"Updating targets on project {owner}/{project}")
logger.debug(f"old = {set(copr_proj.chroot_repos.keys())}")
logger.debug(f"new = {set(chroots)}")
self.copr.project_proxy.edit(
owner,
project,
chroots=chroots,
description=description,
instructions=instructions,
)
except CoprNoResultException:
if owner == configured_owner:
logger.info(f"Copr project {owner}/{project} not found. Creating new.")
self.copr.project_proxy.add(
ownername=owner,
projectname=project,
chroots=chroots,
description=(
description
or "Continuous builds initiated by packit service.\n"
"For more info check out https://packit.dev/"
),
contact="https://github.com/packit-service/packit/issues",
# don't show project on Copr homepage
unlisted_on_hp=True,
# delete project after the specified period of time
delete_after_days=60,
instructions=instructions
or "You can check out the upstream project"
f"{self.upstream_local_project.git_url} to find out how to consume these"
"builds. This copr project is created and handled by the packit project"
"(https://packit.dev/).",
)
else:
raise PackitInvalidConfigException(
f"Copr project {owner}/{project} not found."
)
srpm_path = self.create_srpm(srpm_dir=self.up.local_project.working_dir)
logger.debug(f"owner={owner}, project={project}, path={srpm_path}")
build = self.copr.build_proxy.create_from_file(owner, project, srpm_path)
return build.id, self._copr_web_build_url(build) |
Python | def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) | def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) |
Python | def acknowledge_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter the acknowledged state.
Send an acknowledge event when someone is presently working on the
incident.
"""
return self.create_event(description, "acknowledge",
details, incident_key) | def acknowledge_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter the acknowledged state.
Send an acknowledge event when someone is presently working on the
incident.
"""
return self.create_event(description, "acknowledge",
details, incident_key) |
Python | def trigger_incident(self, description, incident_key=None, details=None,
client=None, client_url=None, contexts=None):
""" Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident.
"""
return self.create_event(description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | def trigger_incident(self, description, incident_key=None, details=None,
client=None, client_url=None, contexts=None):
""" Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident.
"""
return self.create_event(description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) |
Python | def plist_detail(request):
""" Grid view of projectionists and their completed PITs """
context = {}
levels = PITLevel.objects.exclude(name_short__in=['PP', 'L']) \
.order_by('ordering')
users = Projectionist.objects \
.select_related('user')
licensed = Q(pitinstances__pit_level__name_short__in=['L'])
alumni = Q(user__groups__name="Alumni")
context['current_users'] = users.exclude(alumni)
context['licensed_users'] = users.filter(licensed).exclude(alumni)
context['alumni_users'] = users.filter(licensed).filter(alumni)
context['levels'] = levels
context['h2'] = "Projectionist List Detailed"
return render(request, 'projectionlist_detail.html', context) | def plist_detail(request):
""" Grid view of projectionists and their completed PITs """
context = {}
levels = PITLevel.objects.exclude(name_short__in=['PP', 'L']) \
.order_by('ordering')
users = Projectionist.objects \
.select_related('user')
licensed = Q(pitinstances__pit_level__name_short__in=['L'])
alumni = Q(user__groups__name="Alumni")
context['current_users'] = users.exclude(alumni)
context['licensed_users'] = users.filter(licensed).exclude(alumni)
context['alumni_users'] = users.filter(licensed).filter(alumni)
context['levels'] = levels
context['h2'] = "Projectionist List Detailed"
return render(request, 'projectionlist_detail.html', context) |
Python | def projection_update(request, id):
""" Update a projectionist's license and / or PIT records """
projectionist = get_object_or_404(Projectionist, pk=id)
context = {'msg': "Updating Projectionist %s" % projectionist}
if request.method == "POST":
form = ProjectionistUpdateForm(request.POST, instance=projectionist, prefix="main")
formset = PITFormset(request.POST, instance=projectionist, prefix="nested")
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse("projection:grid"))
else:
context['form'] = form
context['formset'] = formset
else:
form = ProjectionistUpdateForm(instance=projectionist, prefix="main")
formset = PITFormset(instance=projectionist, prefix="nested")
context['form'] = form
context['formset'] = formset
context['pk'] = id
return render(request, 'form_crispy_projection.html', context) | def projection_update(request, id):
""" Update a projectionist's license and / or PIT records """
projectionist = get_object_or_404(Projectionist, pk=id)
context = {'msg': "Updating Projectionist %s" % projectionist}
if request.method == "POST":
form = ProjectionistUpdateForm(request.POST, instance=projectionist, prefix="main")
formset = PITFormset(request.POST, instance=projectionist, prefix="nested")
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse("projection:grid"))
else:
context['form'] = form
context['formset'] = formset
else:
form = ProjectionistUpdateForm(instance=projectionist, prefix="main")
formset = PITFormset(instance=projectionist, prefix="nested")
context['form'] = form
context['formset'] = formset
context['pk'] = id
return render(request, 'form_crispy_projection.html', context) |
Python | def bulk_projection(request):
"""
Add new projection events in bulk. This is an internal form and is often used when there are multiple showings
over the course of a weekend.
"""
context = {}
if not request.GET: # Step 1: get contact info and date range
return render(request, "form_crispy.html", {
'form': BulkCreateForm(),
'msg': "Bulk Movie Addition"
})
basicinfoform = BulkCreateForm(request.GET)
if not basicinfoform.is_valid(): # Bad user! Give me the contact/basics!!!
return render(request, "form_crispy.html", {
'form': basicinfoform,
'msg': "Bulk Movie Addition (Errors)"
})
# Past this point, we have enough info for the full form
# create our new form and pass it back
date_start = basicinfoform.cleaned_data.get('date_first')
date_end = basicinfoform.cleaned_data.get('date_second')
# prepopulate things
weeks = [{"date": date} for date in get_saturdays_for_range(date_start, date_end)]
formset = formset_factory(DateEntryFormSetBase, extra=0)
# depending on the return, do other things.
if request.POST:
# here we have our params and data
filled = formset(request.POST, initial=weeks)
if filled.is_valid():
out = []
for form in filled:
out.extend(form.save_objects(
user=request.user,
contact=basicinfoform.cleaned_data.get('contact'),
org=basicinfoform.cleaned_data.get('billing'),
ip=request.META['REMOTE_ADDR']
))
# after thats done
context['result'] = out
return render(request, "form_crispy_bulk_projection_done.html", context)
else:
context['form'] = filled
return render(request, "form_crispy.html", context)
else:
# pass back the empty form
context['msg'] = "Bulk Movie Addition - Choose Movie Details"
context['form'] = formset(initial=weeks)
context['helper'] = DateEntryFormSetBase().helper
context['helper'].add_input(Submit('submit', 'Submit'))
return render(request, "form_crispy.html", context) | def bulk_projection(request):
"""
Add new projection events in bulk. This is an internal form and is often used when there are multiple showings
over the course of a weekend.
"""
context = {}
if not request.GET: # Step 1: get contact info and date range
return render(request, "form_crispy.html", {
'form': BulkCreateForm(),
'msg': "Bulk Movie Addition"
})
basicinfoform = BulkCreateForm(request.GET)
if not basicinfoform.is_valid(): # Bad user! Give me the contact/basics!!!
return render(request, "form_crispy.html", {
'form': basicinfoform,
'msg': "Bulk Movie Addition (Errors)"
})
# Past this point, we have enough info for the full form
# create our new form and pass it back
date_start = basicinfoform.cleaned_data.get('date_first')
date_end = basicinfoform.cleaned_data.get('date_second')
# prepopulate things
weeks = [{"date": date} for date in get_saturdays_for_range(date_start, date_end)]
formset = formset_factory(DateEntryFormSetBase, extra=0)
# depending on the return, do other things.
if request.POST:
# here we have our params and data
filled = formset(request.POST, initial=weeks)
if filled.is_valid():
out = []
for form in filled:
out.extend(form.save_objects(
user=request.user,
contact=basicinfoform.cleaned_data.get('contact'),
org=basicinfoform.cleaned_data.get('billing'),
ip=request.META['REMOTE_ADDR']
))
# after thats done
context['result'] = out
return render(request, "form_crispy_bulk_projection_done.html", context)
else:
context['form'] = filled
return render(request, "form_crispy.html", context)
else:
# pass back the empty form
context['msg'] = "Bulk Movie Addition - Choose Movie Details"
context['form'] = formset(initial=weeks)
context['helper'] = DateEntryFormSetBase().helper
context['helper'].add_input(Submit('submit', 'Submit'))
return render(request, "form_crispy.html", context) |
Python | def send_request_notification(form, update=False):
""" Send the head projectionist a PIT request notification """
name = form.instance.projectionist.user.get_full_name()
pit = form.instance.level.name_long
requested_date = form.instance.scheduled_for
if requested_date is None:
requested_date = "None"
else:
requested_date = requested_date.strftime('%b %d, %Y, %I:%M %p')
message_context = {'CUSTOM_URL': True}
message = "<strong>Projectionist:</strong> " + name + "<br><strong>PIT Level:</strong> " + \
pit + "<br><strong>Requested Date:</strong> " + requested_date + \
"<br><br><a href='https://lnl.wpi.edu" + reverse("projection:pit-schedule") + "'>Review</a>"
if update:
email = GenericEmailGenerator(subject="PIT Request Updated", to_emails=settings.EMAIL_TARGET_HP, body=message,
context=message_context)
else:
email = GenericEmailGenerator(subject="New PIT Request", to_emails=settings.EMAIL_TARGET_HP, body=message,
context=message_context)
email.send() | def send_request_notification(form, update=False):
""" Send the head projectionist a PIT request notification """
name = form.instance.projectionist.user.get_full_name()
pit = form.instance.level.name_long
requested_date = form.instance.scheduled_for
if requested_date is None:
requested_date = "None"
else:
requested_date = requested_date.strftime('%b %d, %Y, %I:%M %p')
message_context = {'CUSTOM_URL': True}
message = "<strong>Projectionist:</strong> " + name + "<br><strong>PIT Level:</strong> " + \
pit + "<br><strong>Requested Date:</strong> " + requested_date + \
"<br><br><a href='https://lnl.wpi.edu" + reverse("projection:pit-schedule") + "'>Review</a>"
if update:
email = GenericEmailGenerator(subject="PIT Request Updated", to_emails=settings.EMAIL_TARGET_HP, body=message,
context=message_context)
else:
email = GenericEmailGenerator(subject="New PIT Request", to_emails=settings.EMAIL_TARGET_HP, body=message,
context=message_context)
email.send() |
Python | def pit_request_update(request, id):
""" Edit a PIT request (accessible by trainee) """
pit_request = get_object_or_404(PitRequest, pk=id)
context = {'title': "Update PIT Request"}
if request.method == "POST":
form = PITRequestForm(request.POST, instance=pit_request, prefix="main")
if form.is_valid():
form.save()
send_request_notification(form, True)
if request.user.has_perm('projection.edit_pits', pit_request):
return HttpResponseRedirect(reverse("projection:pit-schedule"))
else:
return HttpResponseRedirect(reverse("projection:grid"))
else:
context['form'] = form
else:
form = PITRequestForm(instance=pit_request, prefix="main")
context['form'] = form
context['pk'] = id
return render(request, 'projection_pit_request.html', context) | def pit_request_update(request, id):
""" Edit a PIT request (accessible by trainee) """
pit_request = get_object_or_404(PitRequest, pk=id)
context = {'title': "Update PIT Request"}
if request.method == "POST":
form = PITRequestForm(request.POST, instance=pit_request, prefix="main")
if form.is_valid():
form.save()
send_request_notification(form, True)
if request.user.has_perm('projection.edit_pits', pit_request):
return HttpResponseRedirect(reverse("projection:pit-schedule"))
else:
return HttpResponseRedirect(reverse("projection:grid"))
else:
context['form'] = form
else:
form = PITRequestForm(instance=pit_request, prefix="main")
context['form'] = form
context['pk'] = id
return render(request, 'projection_pit_request.html', context) |
Python | def manage_pit_request(request, id):
""" Edit or approve a PIT request (not accessible by trainee) """
pit_request = get_object_or_404(PitRequest, pk=id)
context = {'title': "Manage PIT Request"}
if request.method == "POST":
form = PITRequestAdminForm(request.POST, instance=pit_request, prefix="main")
if form.is_valid():
form.save()
user = form.instance.projectionist.user.email
pit = form.instance.level.name_long
requested_date = form.instance.scheduled_for.strftime('%b %d, %Y at %I:%M %p')
message_context = {'CUSTOM_URL': True}
message = "Your PIT request has been approved! You're now scheduled to get " + pit + " on <strong>" + \
requested_date + "</strong>. In the event that you need to reschedule or cancel this " \
"appointment, please use the links below.<br><br><a href='https://lnl.wpi.edu"\
+ reverse("projection:edit-request", args=[id]) + \
"'>Reschedule</a><br><a href='https://lnl.wpi.edu" + \
reverse("projection:cancel-request", args=[id]) + "'>Cancel</a>"
email = GenericEmailGenerator(to_emails=user, subject="PIT Scheduled", body=message,
context=message_context, reply_to=[settings.EMAIL_TARGET_HP])
if form.instance.approved is True:
email.send()
return HttpResponseRedirect(reverse("projection:pit-schedule"))
else:
context['form'] = form
else:
form = PITRequestAdminForm(instance=pit_request, prefix="main")
context['form'] = form
context['pk'] = id
return render(request, 'projection_pit_request.html', context) | def manage_pit_request(request, id):
""" Edit or approve a PIT request (not accessible by trainee) """
pit_request = get_object_or_404(PitRequest, pk=id)
context = {'title': "Manage PIT Request"}
if request.method == "POST":
form = PITRequestAdminForm(request.POST, instance=pit_request, prefix="main")
if form.is_valid():
form.save()
user = form.instance.projectionist.user.email
pit = form.instance.level.name_long
requested_date = form.instance.scheduled_for.strftime('%b %d, %Y at %I:%M %p')
message_context = {'CUSTOM_URL': True}
message = "Your PIT request has been approved! You're now scheduled to get " + pit + " on <strong>" + \
requested_date + "</strong>. In the event that you need to reschedule or cancel this " \
"appointment, please use the links below.<br><br><a href='https://lnl.wpi.edu"\
+ reverse("projection:edit-request", args=[id]) + \
"'>Reschedule</a><br><a href='https://lnl.wpi.edu" + \
reverse("projection:cancel-request", args=[id]) + "'>Cancel</a>"
email = GenericEmailGenerator(to_emails=user, subject="PIT Scheduled", body=message,
context=message_context, reply_to=[settings.EMAIL_TARGET_HP])
if form.instance.approved is True:
email.send()
return HttpResponseRedirect(reverse("projection:pit-schedule"))
else:
context['form'] = form
else:
form = PITRequestAdminForm(instance=pit_request, prefix="main")
context['form'] = form
context['pk'] = id
return render(request, 'projection_pit_request.html', context) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.