rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
parser.add_option("-p", "--nostore", dest="store_msg",
parser.add_option("-s", "--nostore", dest="store_msg",
def _read_amqp_config(galaxy_config): """Read connection information on the RabbitMQ server from Galaxy config. """ config = ConfigParser.ConfigParser() config.read(galaxy_config) amqp_config = {} for option in config.options("galaxy_amqp"): amqp_config[option] = config.get("galaxy_amqp", option) return amqp_config
else: return i
def search_connections (c): i=0 #seraching for a specific box in memory for connections in memory_connections: if c==connections: return i i+=1 #return -1 if not if i==len(memory_connections): return -1 else: return i
sleep(0.1)
sleep(0.01)
def create(self): b1 = search_box(self.box_orig) b2 = search_box(self.box_dest)
print t1 print print "------" print print t2
def create(self): b1 = search_box(self.box_orig) b2 = search_box(self.box_dest)
sleep(0.1)
sleep(0.01)
def delete(self): b1 = search_box(self.box_orig) b2 = search_box(self.box_dest) if (b1 > -1) & (b2 > -1): #get the state before removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t1 = self.snd.get_file() #try to remove the connection command = Connection.canvas + "disconnect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; " Connection.snd.send_pd(command) #get the state after removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t2 = self.snd.get_file() print t1 print print "------" print print t2 #verifies if changed if t1 != t2: i=search_connections(self) #print i memory_box.pop(i) print "funfou!" else: print "nao funfou!"
print t1 print print "------" print print t2
def delete(self): b1 = search_box(self.box_orig) b2 = search_box(self.box_dest) if (b1 > -1) & (b2 > -1): #get the state before removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t1 = self.snd.get_file() #try to remove the connection command = Connection.canvas + "disconnect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; " Connection.snd.send_pd(command) #get the state after removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t2 = self.snd.get_file() print t1 print print "------" print print t2 #verifies if changed if t1 != t2: i=search_connections(self) #print i memory_box.pop(i) print "funfou!" else: print "nao funfou!"
memory_box.pop(i)
memory_connections.pop(i)
def delete(self): b1 = search_box(self.box_orig) b2 = search_box(self.box_dest) if (b1 > -1) & (b2 > -1): #get the state before removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t1 = self.snd.get_file() #try to remove the connection command = Connection.canvas + "disconnect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; " Connection.snd.send_pd(command) #get the state after removing the connection self.snd.send_pd("pd-new menusave ; ") sleep(0.1) t2 = self.snd.get_file() print t1 print print "------" print print t2 #verifies if changed if t1 != t2: i=search_connections(self) #print i memory_box.pop(i) print "funfou!" else: print "nao funfou!"
command = "symbolatom " + str(s.x) + " " + str(s.y) + " 10 0 0 0 - - pyata ;"
command = "obj " + str(s.x) + " " + str(s.y) + " sym ;"
def debug_symbol(): pd = Communication(False) pd.init_pd() s = Symbol(10, 10, 0) command = "symbolatom " + str(s.x) + " " + str(s.y) + " 10 0 0 0 - - pyata ;" print command pd.send_pd(command) sleep(2) command = s.set("mesa") print command pd.send_pd(command) sleep(2) pd.finish_pd()
def debug_number(): pd = Communication(False) pd.init_pd() n = Number(10, 10, 0) command = "floatatom " + str(n.x) + " " + str(n.y) + " 5 0 0 0 - - pyata ;" print command pd.send_pd(command) sleep(2) command = n.increment() print command pd.send_pd(command) sleep(2) print n.get_value() command = n.increment() print command pd.send_pd(command) sleep(2) print n.get_value() command = n.decrement() print command pd.send_pd(command) sleep(2) print n.get_value() command = n.set(20) print command pd.send_pd(command) sleep(2) print n.get_value() pd.finish_pd()
def debug_message(): pd = Communication(False) pd.init_pd() m = Message(10, 10, "alo", 0) command = "msg " + str(m.x) + " " + str(m.y) + " " + m.text + " ; " print command pd.send_pd(command) sleep(2) command = m.click() print command pd.send_pd(command) sleep(2) command = "editmode 1 ;" print command pd.send_pd(command) sleep(2) command = m.edit("mimimi") print command pd.send_pd(command) sleep(2) command = "editmode 0 ;" print command pd.send_pd(command) sleep(2) command = m.click() print command pd.send_pd(command) sleep(2) pd.finish_pd()
if user:
if Transaction().user:
def check_root(self, ids): "Check Root" account_obj = self.pool.get('analytic_account.account')
self.assertRaises(Exception, test_view('analytic_account'))
test_view('analytic_account')
def test0005views(self): ''' Test views. ''' self.assertRaises(Exception, test_view('analytic_account'))
print '''Usage: %s [-s||-f|-h]
print '''Usage: %s [-s|-f|-h]
def notify_dbus(args): args = args.split(':') return [NOTIFIER_DBUS, '-i', 'gtk-dialog-info', '-t', '5000', ':'.join(args[1:]), args[0]]
''' % sys.argv[0]
''' % (sys.argv[0], sys.argv[0])
def notify_dbus(args): args = args.split(':') return [NOTIFIER_DBUS, '-i', 'gtk-dialog-info', '-t', '5000', ':'.join(args[1:]), args[0]]
Futhermore it is possible to multiply the counter with an :class:`int` as
Furthermore it is possible to multiply the counter with an :class:`int` as
def __repr__(self): content = repr(self.items()) if self else '' return '%s(%s)' % (self.__class__.__name__, content)
occurence of the given `object` or entirely if it is not found.
occurrence of the given `object` or entirely if it is not found.
def remove(self, object): """ Looks for the given `object` in the list and removes the first occurrence.
Sets the given `obj` as result, set `sucess` to ``False`` if `obj`
Sets the given `obj` as result, set `success` to ``False`` if `obj`
def set(self, obj, success=True): """ Sets the given `obj` as result, set `sucess` to ``False`` if `obj` is an exception. """ self.value = obj self.success = success if self.callback and success: self.callback(obj) if self.errback and not success: self.errback(obj) with self.condition: self.ready = True self.condition.notify()
class MultiDictTextMixin(object):
class MultiDictTestMixin(object):
def clear(self): d = self.dict_class() with Assert.raises(TypeError): d.clear()
class TestMultiDict(TestBase, MultiDictTextMixin, DictTestMixin):
class TestMultiDict(TestBase, MultiDictTestMixin, DictTestMixin):
def popitemlist(self): d = self.dict_class({'foo': 'bar'}) Assert(d.popitemlist()) == ('foo', ['bar']) with Assert.raises(KeyError): d.popitemlist() d = self.dict_class({'foo': ['bar', 'baz']}) Assert(d.popitemlist()) == ('foo', ['bar', 'baz']) with Assert.raises(KeyError): d.popitemlist()
return (self[k] for k in self)
return (dict.__getitem__(self, k) for k in self)
def itervalues(self): """ Returns an iterator over the values of all items in insertion order. """ return (self[k] for k in self)
sysconf_name = 'SC_NPROCESSORS_ONLN' if sysconf_name in os.sysconf_names: return os.sysconf('SC_NPROCESSORS_ONLN')
try: cpu_count = os.sysconf('SC_NPROCESSORS_ONLN') if cpu_count >= 1: return cpu_count except AttributeError, ValueError:
def get_cpu_count(default=None): sysconf_name = 'SC_NPROCESSORS_ONLN' if sysconf_name in os.sysconf_names: return os.sysconf('SC_NPROCESSORS_ONLN') if default is not None: return default raise NotImplementedError()
Return `r` length subsequences of elements from the `iterable` allowing
Return `r` length sub-sequences of elements from the `iterable` allowing
def combinations_with_replacement(iterable, r): """ Return `r` length subsequences of elements from the `iterable` allowing individual elements to be replaced more than once. Combinations are emitted in lexicographic sort order. So, if the input `iterable` is sorted, the combinations tuples will be produced in sorted order. Elements are treated as unique based on their position, not on their value. So if the input elements are unique, the generated combinations will also be unique. The number of items returned is ``(n + r - 1)! / r! / (n - 1)!`` when ``n > 0``. .. note:: Software and documentation for this function are taken from CPython, :ref:`license details <psf-license>`. """ pool = tuple(iterable) n = len(pool) for indices in product(xrange(n), repeat=r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices)
Cost in terms of lazyness of supported operators, this does not include
Cost in terms of laziness of supported operators, this does not include
def __and__(self, other): if not isinstance(other, self.__class__): return NotImplemented result = Counter() if len(self) < len(other): self, other = other, self for element in ifilter(self.__contains__, other): newcount = min(self[element], other[element]) if newcount > 0: result[element] = newcount return result
except ValueError, KeyError:
except (ValueError, KeyError):
def get_cpu_count(default=None): if sys.platform == 'win32': try: return int(os.environ['NUMBER_OF_PROCESSORS']) except ValueError, KeyError: # value could be anything or not existing pass if sys.platform in ('bsd', 'darwin'): try: return int(os.popen('sysctl -n hw.ncpu').read()) except ValueError: # don't trust the outside world pass try: cpu_count = os.sysconf('SC_NPROCESSORS_ONLN') if cpu_count >= 1: return cpu_count except AttributeError, ValueError: # availability is restricted to unix pass if default is not None: return default raise NotImplementedError()
except AttributeError, ValueError:
except (AttributeError, ValueError):
def get_cpu_count(default=None): if sys.platform == 'win32': try: return int(os.environ['NUMBER_OF_PROCESSORS']) except ValueError, KeyError: # value could be anything or not existing pass if sys.platform in ('bsd', 'darwin'): try: return int(os.popen('sysctl -n hw.ncpu').read()) except ValueError: # don't trust the outside world pass try: cpu_count = os.sysconf('SC_NPROCESSORS_ONLN') if cpu_count >= 1: return cpu_count except AttributeError, ValueError: # availability is restricted to unix pass if default is not None: return default raise NotImplementedError()
def find_crosses(satellite1, start, end, satellite2='calipso', t_window=2, lon_range=None,
def find_crosses(satellite1, start, end, satellite2='calipso', time_window=20, lon_range=None,
def find_crosses(satellite1, start, end, satellite2='calipso', t_window=2, lon_range=None, lat_range=None): """Use snotimes to find satellite passes where the given *satellite* crosses CALIPSO's path within a time window *t_window*. Sort out any cross points which are outside *lon_range* and/or *lat_range*.""" import subprocess cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(t_window)] print(' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) return parse_crosses_file(process.stdout, lon_range, lat_range)
import subprocess
from subprocess import Popen, PIPE import os
def find_crosses(satellite1, start, end, satellite2='calipso', t_window=2, lon_range=None, lat_range=None): """Use snotimes to find satellite passes where the given *satellite* crosses CALIPSO's path within a time window *t_window*. Sort out any cross points which are outside *lon_range* and/or *lat_range*.""" import subprocess cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(t_window)] print(' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) return parse_crosses_file(process.stdout, lon_range, lat_range)
cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(t_window)]
wd = os.getcwd() os.chdir(os.path.dirname(SNO_EXECUTABLE)) cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(time_window)]
def find_crosses(satellite1, start, end, satellite2='calipso', t_window=2, lon_range=None, lat_range=None): """Use snotimes to find satellite passes where the given *satellite* crosses CALIPSO's path within a time window *t_window*. Sort out any cross points which are outside *lon_range* and/or *lat_range*.""" import subprocess cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(t_window)] print(' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) return parse_crosses_file(process.stdout, lon_range, lat_range)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.stderr.close() os.chdir(wd)
def find_crosses(satellite1, start, end, satellite2='calipso', t_window=2, lon_range=None, lat_range=None): """Use snotimes to find satellite passes where the given *satellite* crosses CALIPSO's path within a time window *t_window*. Sort out any cross points which are outside *lon_range* and/or *lat_range*.""" import subprocess cmd = [SNO_EXECUTABLE, satellite1, satellite2, start, end, str(t_window)] print(' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) return parse_crosses_file(process.stdout, lon_range, lat_range)
parser.set_usage("usage: %prog [options] satellite1 [satellite2 [...]]") parser.add_option('-t', '--time_window', type='int', default=2, help="Time window for crossing.")
parser.set_usage("usage: %prog [options] <start YYMMDD> <end YYMMDD> satellite1 [satellite2 [...]]\n" "Find times and locations where Calipso and the specified satellites cross paths.") parser.add_option('-t', '--time_window', type='int', default=20, help="Time window for crossing. Default: 20 min.")
def parse_range(range): """Parse *range*, which should look like -35.7354:25.1. Return (lower, upper).""" l = range.split(':') return (float(l[0]), float(l[1]))
for satellite in args: find_crosses(satellite, options.time_window, lon_range, lat_range)
start = args[0] end = args[1] crosses = set() for satellite in args[2:]: crosses.update(find_crosses(satellite, start, end, time_window=options.time_window, lon_range=lon_range, lat_range=lat_range)) for cross in sorted(crosses): print(cross)
def parse_range(range): """Parse *range*, which should look like -35.7354:25.1. Return (lower, upper).""" l = range.split(':') return (float(l[0]), float(l[1]))
i=a+21
i=a
def FindFiles(avhrrfile, avhrr_dir_date): #avhrr_sat = 'NOAA-18' #RESOLUTION = 5 #avhrr_dir_date = ['06','2007','06','2007'] #CL_DIR = "%s/CloudSat/5km/2007/06" %(SAT_DIR) #CAL_DIR = "%s/Calipso/5km/2007/06" %(SAT_DIR) #AVHRR_DIR = "%s/%s/5km/2007/06" %(SAT_DIR,avhrr_sat) #cloudtypefile = 'noaa18_20070622_1208_10760_satproj_00000_07619_cloudtype.h5' avhrr_split = os.path.basename(avhrrfile).split("_") avhrr_year = avhrr_split[1][0:4] avhrr_month = avhrr_split[1][4:6] #avhrr_day = avhrr_split[1][6:8] #avhrr_hour = avhrr_split[2][0:2] #avhrr_min = avhrr_split[2][2:4] #avhrr_date = "%s %s %s %s %s" %(avhrr_year,avhrr_month,avhrr_day,avhrr_hour,avhrr_min) #avhrr_date = time.strptime(avhrr_date, "%Y %m %d %H %M") #avhrr_sec = time.mktime(avhrr_date)-sec_timeThr avhrr_sec = AvhrrSec(avhrrfile) avhrr_sec = avhrr_sec-sec_timeThr new_date = time.localtime(avhrr_sec) # Controls if more than one cloudsat/calipso dir is necessary all_cl = [] all_cal = [] if avhrr_dir_date[0]!=avhrr_dir_date[2]: CL_DIR = "%s/CloudSat/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[1],avhrr_dir_date[0]) CAL_DIR = "%s/Calipso/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[1],avhrr_dir_date[0]) CL_DIR_1 = "%s/CloudSat/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[3],avhrr_dir_date[2]) CAL_DIR_1 = "%s/Calipso/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[3],avhrr_dir_date[2]) all_cl = os.listdir(CL_DIR) all_cl_1 = os.listdir(CL_DIR_1) all_cl.extend(all_cl_1) all_cal = os.listdir(CAL_DIR) all_cal_1 = os.listdir(CAL_DIR_1) all_cal.extend(all_cal_1) else: CL_DIR = "%s/CloudSat/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[1],avhrr_dir_date[0]) CAL_DIR = "%s/Calipso/%skm/%s/%s" %(SAT_DIR,RESOLUTION,avhrr_dir_date[1],avhrr_dir_date[0]) all_cl = os.listdir(CL_DIR) all_cal = os.listdir(CAL_DIR) all_cl_geo = [] all_cl_cwc = [] #This just beqause the file in one and 5 km data set have different names if RESOLUTION==1: clsat_type_place=3 clsat_time_place=0 elif RESOLUTION==5: clsat_type_place=4 clsat_time_place=1 for i in range(len(all_cl)): cloudsat_type = all_cl[i].split(".h5")[0] cloudsat_type = string.join(cloudsat_type.split("_")[clsat_type_place].split("-")[1:],"-") if cloudsat_type == 'GEOPROF': all_cl_geo.append(all_cl[i]) elif cloudsat_type == 'CWC-RVOD': all_cl_cwc.append(all_cl[i]) if clsat_type==1: all_clsat=all_cl_geo elif clsat_type==2: all_clsat=all_cl_cwc CLOUDSAT_DIR=[] ncl=0 for j in range(len(all_clsat)): all_clsat.sort() cloudsat_split = all_clsat[j].split("_") cloudsat_date = "%s %s %s %s %s" %(cloudsat_split[clsat_time_place][0:4], cloudsat_split[clsat_time_place][4:7],cloudsat_split[clsat_time_place][7:9],cloudsat_split[clsat_time_place][9:11],cloudsat_split[clsat_time_place][11:13]) cloudsat_date = time.strptime(cloudsat_date, "%Y %j %H %M %S") cloudsat_sec = time.mktime(cloudsat_date) if cloudsat_date[1] == int(avhrr_dir_date[0]): CLOUDSAT_DIR.append(CL_DIR) else: CLOUDSAT_DIR.append(CL_DIR_1) if cloudsat_sec >= avhrr_sec and ncl==0: #clsat_file = "'%s/%s' '%s/%s' '%s/%s'" %(CLOUDSAT_DIR[j-1],all_clsat[j-1],CLOUDSAT_DIR[j],all_clsat[j],CLOUDSAT_DIR[j+1],all_clsat[j+1]) clsat_file = "'%s/%s' '%s/%s'" %(CLOUDSAT_DIR[j-1],all_clsat[j-1],CLOUDSAT_DIR[j],all_clsat[j]) ncl=ncl+1 elif cloudsat_sec >= avhrr_sec and ncl==1: clsat_file = "%s '%s/%s'" %(clsat_file,CLOUDSAT_DIR[j],all_clsat[j]) ncl=ncl+1 if ncl>1: break CALIPSO_DIR=[] ncal=0 for a in range(len(all_cal)): all_cal.sort() cal_split = all_cal[a].split("-") cal_year = cal_split[3].split(".")[1] cal_month = cal_split[4] cal_day = cal_split[5].split("T")[0] cal_hour = cal_split[5].split("T")[1] cal_min = cal_split[6] cal_sec = cal_split[7][0:2] cal_date = "%s %s %s %s %s %s" %(cal_year,cal_month,cal_day,cal_hour,cal_min,cal_sec) cal_date = time.strptime(cal_date, "%Y %m %d %H %M %S") cal_tot_sec = time.mktime(cal_date) if cal_date[1] == int(avhrr_dir_date[0]): CALIPSO_DIR.append(CAL_DIR) else: CALIPSO_DIR.append(CAL_DIR_1) if cal_tot_sec >= avhrr_sec and ncal==0: cal_file = "'%s/%s' '%s/%s' '%s/%s'" %(CALIPSO_DIR[a-2],all_cal[a-2],CALIPSO_DIR[a-1],all_cal[a-1],CALIPSO_DIR[a],all_cal[a]) ncal=ncal+1 elif cal_tot_sec >= avhrr_sec and ncal>=1 and ncal<4: cal_file = "%s '%s/%s'" %(cal_file,CALIPSO_DIR[a],all_cal[a]) ncal=ncal+1 if ncl >3: break AVHRR_DIR = "%s/%s/%skm/%s/%s" %(SAT_DIR,avhrr_sat,RESOLUTION,avhrr_year,avhrr_month) #test=all_cal.sort() avhrr_join = string.join(avhrr_split[0:-1],"_") cloudtype_file = "%s/export/%s_cloudtype.h5" %(AVHRR_DIR, avhrr_join) ctth_file = "%s/export/%s_ctth.h5" %(AVHRR_DIR, avhrr_join) avhrr_file = "%s/import/%s_avhrr.h5" %(AVHRR_DIR, avhrr_join) nwp_tsur_file = "%s/import/%s_nwp_tsur.h5" %(AVHRR_DIR, avhrr_join) sunsatangles_file = "%s/import/%s_sunsatangles.h5" %(AVHRR_DIR, avhrr_join) # avhrr_date = (avhrr_year,avhrr_month,avhrr_day,avhrr_hour,avhrr_min) return clsat_file, cal_file, cloudtype_file, ctth_file, avhrr_file, nwp_tsur_file, sunsatangles_file
if len(avhrr_height) > 20:
if len(avhrr_height_work) > 20:
def CalculateStatistics(mode, clsatObj, statfile, caObj, cal_MODIS_cflag, cal_vert_feature, avhrr_ctth_csat_ok, data_ok, cal_data_ok, avhrr_ctth_cal_ok, caliop_max_height, process_calipso_ok, Resolution): import Scientific.Statistics import numpy
upplosning = len(cllon)/500
N = len(cllon)/500 + 2 upplosning = len(cllon)/N
def plotSatelliteTrajectory(cllon,cllat,calon,calat,avhrlon,avhrlat,trajectoryname): from mpl_toolkits.basemap import Basemap import pylab #import pdb #pdb.set_trace() #from setup import AREA upplosning = len(cllon)/500 print('Resolution = %i' %(upplosning)) m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90,\ llcrnrlon=-180,urcrnrlon=180,resolution='c') m.drawmapboundary() m.drawcoastlines() m.drawgreatcircle(cllon[0],cllat[0],cllon[1],cllat[1],color='red') m1 = m.drawgreatcircle(cllon[0],cllat[0],cllon[1],cllat[1],color='red') #m.drawgreatcircle(calon[0],calat[0],calon[1],calat[1],color='green') #m.drawgreatcircle(avhrlon[0],avhrlat[0],avhrlon[1],avhrlat[1],color='blue') print("Draw CloudSat trajectory") for i in range(1,len(cllon)-upplosning,upplosning):#len(cllon)-1,500): if cllon[i] < 0 and cllon[i+upplosning] > 0: pass else: m.drawgreatcircle(cllon[i],cllat[i],cllon[i+upplosning],cllat[i+upplosning],color='red') print("Draw Calipso trajectory") #for j in range(1,len(calon)-upplosning,upplosning): # if calon[j] < 0 and calon[j+upplosning] > 0: # pass # else: # m.drawgreatcircle(calon[j],calat[j],calon[j+upplosning],calat[j+upplosning],color='green') #print("Draw Avhrr trajectory") #for k in range(1,len(avhrlon)-upplosning,upplosning): # if avhrlon[k] < 0 and avhrlon[k+upplosning] > 0: # pass # else: # m.drawgreatcircle(avhrlon[k],avhrlat[k],avhrlon[k+upplosning],avhrlat[k+upplosning],color='blue') m.drawgreatcircle(cllon[i],cllat[i],cllon[-1],cllat[-1],color='red') #m.drawgreatcircle(calon[j],calat[j],calon[-1],calat[-1],color='green') #m.drawgreatcircle(avhrlon[k],avhrlat[k],avhrlon[-1],avhrlat[-1],color='blue') #pdb.set_trace() #pylab.legend(('CloudSat','Calipso','Avhrr'),loc=0) pylab.legend((m1),['CloudSat/Calipso'],loc=0) #pylab.legend(("CloudSat"),loc=0) epsfig = '%s.eps' %trajectoryname #pngfig = '%s.png' %trajectoryname pylab.savefig(epsfig) #pylab.savefig(pngfig)
pdb.set_trace()
def getCaliopAvhrrMatch(avhrrfile,calipsofile,ctypefile,ctthfile,surftfile,sunanglefile): import string,os import pps_io import epshdf import Numeric basename = os.path.basename(ctypefile).split(".h5")[0] base_sat = basename.split("_")[-8] base_year = basename.split("_")[-7][0:4] base_month = basename.split("_")[-7][4:6] basename = string.join(basename.split("_")[0:4],"_") #dirname = os.path.dirname(ctypefile).split("/")[0:2] #dirname = string.join(dirname,"/") savepath = "%s/%s/1km/%s/%s/%s" %(RESHAPE_DIR, base_sat, base_year, base_month, AREA1KM) ca_match_file = "%s/1km_%s_caliop_avhrr_match.h5"%(savepath,basename) if not os.path.exists(savepath): os.makedirs(savepath) #print "Match-up file: ",ca_match_file if not os.path.exists(ca_match_file): # Read AVHRR lon,lat data write_log("INFO","Read AVHRR geolocation data") avhrrGeoObj = pps_io.readAvhrrGeoData(avhrrfile) pdb.set_trace() # Read AVHRR sunsatangels (satellite zenith angle) write_log("INFO","Read AVHRR Sun -and Satellites Angels data") avhrrAngObj = pps_io.readSunSatAngles(sunanglefile) #, withAbsoluteAzimuthAngles=True) # Read AVHRR data write_log("INFO","Read AVHRR data") avhrrObj = pps_io.readAvhrrData(avhrrfile) # Read PPS Cloud Type data write_log("INFO","Read PPS Cloud Type") ctype = epshdf.read_cloudtype(ctypefile,1,1,0) try: ctth = epshdf.read_cloudtop(ctthfile,1,1,1,0,1) except: ctth = None # -------------------------------------------------------------------- write_log("INFO","Read CALIPSO data") # Read CALIPSO Lidar (CALIOP) data: calipso = reshapeCalipso1km(calipsofile,avhrrGeoObj) pdb.set_trace() # Read remapped NWP Surface temperature data write_log("INFO","Read NWP surface temperature") nwpinst = epshdf.read_nwpdata(surftfile) surft = nwpinst.gain*nwpinst.data.astype('d')+nwpinst.intercept retv,min_diff,max_diff = match_calipso_avhrr(ctypefile,calipso,avhrrGeoObj,avhrrObj,ctype,ctth,surft,avhrrAngObj) writeCaliopAvhrrMatchObj(ca_match_file,retv,6) else: retv = readCaliopAvhrrMatchObj(ca_match_file) min_diff = -9.0 # We don't store this information - only extracted during first time of processing max_diff = -9.0 # We don't store this information - only extracted during first time of processing return retv,min_diff,max_diff
return self.price_set.all()[0]
price_set = self.price_set.all() if price_set: return price_set[0] else: return None
def current_day_price(self): return self.price_set.all()[0]
return self.price_set.all()[1]
price_set = self.price_set.all() if price_set and price_set.count() >= 2: return price_set[1] else: return None
def previous_day_price(self): return self.price_set.all()[1]
return self.price_set.all()[:15]
price_set = self.price_set.all() if price_set: return price_set[:15] else: return []
def latest_15_day_price_set(self): return self.price_set.all()[:15]
until_date = self.price_set.all()[0].date from_date = until_date - timedelta(52*7) self._last_52_week_price_set = self.price_set.filter(date__gt=from_date, date__lte=until_date)
current_day_price = self.current_day_price if current_day_price: until_date = current_day_price.date from_date = until_date - timedelta(52*7) self._last_52_week_price_set = self.price_set.filter(date__gt=from_date, date__lte=until_date) else: self._last_52_week_price_set = []
def last_52_week_price_set(self): try: self._last_52_week_price_set except AttributeError: until_date = self.price_set.all()[0].date from_date = until_date - timedelta(52*7) self._last_52_week_price_set = self.price_set.filter(date__gt=from_date, date__lte=until_date) return self._last_52_week_price_set
img -= minimum(img,T)
img -= np.minimum(img,T)
def softthreshold(img,T): ''' softthreshold(img,T) Implement a soft threshold: img[i] = max(img[i]-T,0) Processes the image inplace, return a reference to img. Use B = softthreshold(A.copy(),T) to get a copy. @see hardthreshold ''' img -= minimum(img,T) return img
def rc(img,ignore_zeros=False): """ T = rc(img, ignore_zeros=False) Calculate a threshold according to the RC method. @param ignore_zeros: Whether to ignore zero valued pixels (default: False) """ hist=fullhistogram(img) if ignore_zeros: if hist[0] == img.size: return 0 hist[0]=0 N=hist.size sum1 = cumsum(arange(N) * hist) sum2 = cumsum(hist) sum3 = flipud(cumsum(flipud(arange(N) * hist))) sum4 = flipud(cumsum(flipud(hist))) maxt=N-1 while hist[maxt] == 0: maxt -= 1 res=maxt t=0 while t < min(maxt,res): res=(sum1[t]/sum2[t] + sum3[t+1]/sum4[t+1])/2 t += 1 return res
def rc(img,ignore_zeros=False): """ T = rc(img, ignore_zeros=False) Calculate a threshold according to the RC method. @param ignore_zeros: Whether to ignore zero valued pixels (default: False) """ hist=fullhistogram(img) if ignore_zeros: if hist[0] == img.size: return 0 hist[0]=0 N=hist.size # Precompute most of what we need: sum1 = cumsum(arange(N) * hist) sum2 = cumsum(hist) sum3 = flipud(cumsum(flipud(arange(N) * hist))) sum4 = flipud(cumsum(flipud(hist))) maxt=N-1 while hist[maxt] == 0: maxt -= 1 res=maxt t=0 while t < min(maxt,res): res=(sum1[t]/sum2[t] + sum3[t+1]/sum4[t+1])/2 t += 1 return res
version='0.4.9',
version='0.5.0-rc0',
def test_pyversion(): import sys maj,min,_,_,_ = sys.version_info if (maj,min) < (2,5): print "Your Python interpreter is too old for Pyslic.\nUpgrade to 2.5 or newer.\n" sys.exit(1)
author_email='[email protected]',
author_email='[email protected]',
def test_pyversion(): import sys maj,min,_,_,_ = sys.version_info if (maj,min) < (2,5): print "Your Python interpreter is too old for Pyslic.\nUpgrade to 2.5 or newer.\n" sys.exit(1)
weight=nu.reshape(self.weight[key,:],(nkey,self.da)))
weight=self.weight[key,:])
def __getitem__(self,key): if not isinstance(key,slice): nkey= 1 else: nkey= len(self.a[key,0]) if len(self.acov.shape) == 2: acov= self.acov[key,:] dacov= (nkey,self.da) else: acov= self.acov[key,:,:] dacov= (nkey,self.da,self.da) if hasattr(self,'weight'): out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)), acov=nu.reshape(acov,dacov), weight=nu.reshape(self.weight[key,:],(nkey,self.da))) else: out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)), acov=nu.reshape(acov,dacov)) #Also transfer tags if self._alltags: for tag in self._tags: thisshape= self.__dict__[tag].shape thistag= nu.reshape(self.__dict__[tag],(thisshape[0],nu.prod(thisshape)/thisshape[0])) tmptag= thistag[key,:] outshape=[nkey] nshape= len(list(thisshape)) thisshape= [thisshape[ii] for ii in range(nshape) if ii != 0] outshape.extend([s for s in thisshape]) outshape= tuple(outshape) out.__dict__[tag]= nu.reshape(tmptag,outshape) out._alltags= self._alltags out._tags= self._tags return out
weight=self.weight[key,:])
weight=self.weight[key])
def __getitem__(self,key): if not isinstance(key,slice): nkey= 1 else: nkey= len(self.a[key,0]) if len(self.acov.shape) == 2: acov= self.acov[key,:] dacov= (nkey,self.da) else: acov= self.acov[key,:,:] dacov= (nkey,self.da,self.da) if hasattr(self,'weight'): out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)), acov=nu.reshape(acov,dacov), weight=self.weight[key,:]) else: out= xddata(a=nu.reshape(self.a[key,:],(nkey,self.da)), acov=nu.reshape(acov,dacov)) #Also transfer tags if self._alltags: for tag in self._tags: thisshape= self.__dict__[tag].shape thistag= nu.reshape(self.__dict__[tag],(thisshape[0],nu.prod(thisshape)/thisshape[0])) tmptag= thistag[key,:] outshape=[nkey] nshape= len(list(thisshape)) thisshape= [thisshape[ii] for ii in range(nshape) if ii != 0] outshape.extend([s for s in thisshape]) outshape= tuple(outshape) out.__dict__[tag]= nu.reshape(tmptag,outshape) out._alltags= self._alltags out._tags= self._tags return out
out.append(_sample_normal(self.mean[c,:],self.covar[c,:,:],
out.extend(_sample_normal(self.mean[c,:],self.covar[c,:,:],
def sample(self,nsample=1): """ NAME:
tmp_tags.pop(ii)
tmp_tags.pop(ii-popped) popped+= 1
def __init__(self,**kwargs): if kwargs.has_key('filename'): tmp_ext= re.split('\.',kwargs['filename'])[-1] if tmp_ext == 'gz': tmp_ext= re.split('\.',kwargs['filename'])[-2]+'.'+tmp_ext if tmp_ext == 'fit' or tmp_ext == 'fits' or \ tmp_ext == 'fit.gz' or tmp_ext == 'fits.gz': if kwargs.has_key('atag'): atag= kwargs['atag'] else: atag= 'a' if kwargs.has_key('acovtag'): acovtag= kwargs['acovtag'] else: acovtag= 'acov' if kwargs.has_key('wtag'): wtag= kwargs['wtag'] else: wtag= 'weight' import pyfits hdulist= pyfits.open(kwargs['filename']) tbdata= hdulist[1].data self.a= nu.array(tbdata.field(atag)).astype('float64') if acovtag.lower() in [name.lower() for name in hdulist[1].columns.names]: self.acov= nu.array(tbdata.field(acovtag)).astype('float64') if self.acov.shape[1] != self.a.shape[1]: self.acov= nu.reshape(self.acov,(self.a.shape[0],self.a.shape[1],self.a.shape[1])) else: self.acov= nu.zeros(self.a.shape) if kwargs.has_key('useweights') and kwargs['useweights']: self.weight= nu.array(tbdata.field(wtag)).astype('float64') if kwargs.has_key('alltags') and kwargs['alltags']: tags= hdulist[1].columns.names tmp_tags= deepcopy(tags) for ii in range(len(tags)): if tags[ii].lower() == atag.lower() or \ tags[ii].lower() == acovtag.lower(): tmp_tags.pop(ii) if kwargs.has_key('useweights') and kwargs['useweights'] and tags[ii].lower() == wtag.lower(): tmp_tags.pop(ii) tags= tmp_tags for tag in tags: self.__dict__[tag.lower()]= tbdata.field(tag) elif kwargs.has_key('a'): self.a= kwargs['a'] if kwargs.has_key('acov'): self.acov= kwargs['acov'] else: self.acov= nu.zeros(self.a.shape) if kwargs.has_key('weight'): self.weight= kwargs['weight'] self.da= self.a.shape[1]
tags[ii].lower() == acovtag.lower:
tags[ii].lower() == acovtag.lower():
def __init__(self,**kwargs): if kwargs.has_key('filename'): tmp_ext= re.split('\.',kwargs['filename'])[-1] if tmp_ext == 'gz': tmp_ext= re.split('\.',kwargs['filename'])[-2]+'.'+tmp_ext if tmp_ext == 'fit' or tmp_ext == 'fits' or \ tmp_ext == 'fit.gz' or tmp_ext == 'fits.gz': if kwargs.has_key('atag'): atag= kwargs['atag'] else: atag= 'a' if kwargs.has_key('acovtag'): acovtag= kwargs['acovtag'] else: acovtag= 'acov' if kwargs.has_key('wtag'): wtag= kwargs['wtag'] else: wtag= 'weight' import pyfits hdulist= pyfits.open(kwargs['filename']) tbdata= hdulist[1].data self.a= nu.array(tbdata.field(atag)).astype('float64') if acovtag.lower() in [name.lower() for name in hdulist[1].columns.names]: self.acov= nu.array(tbdata.field(acovtag)).astype('float64') if self.acov.shape[1] != self.a.shape[1]: self.acov= nu.reshape(self.acov,(self.a.shape[0],self.a.shape[1],self.a.shape[1])) else: self.acov= nu.zeros(self.a.shape) if kwargs.has_key('useweights') and kwargs['useweights']: self.weight= nu.array(tbdata.field(wtag)).astype('float64') if kwargs.has_key('alltags') and kwargs['alltags']: tags= hdulist[1].columns.names tmp_tags= deepcopy(tags) for ii in range(len(tags)): if tags[ii].lower() == atag.lower() or \ tags[ii].lower() == acovtag.lower: tmp_tags.pop(ii) if kwargs.has_key('useweights') and kwargs['useweights'] and tags[ii].lower() == wtag.lower(): tmp_tags.pop(ii) tags= tmp_tags for tag in tags: self.__dict__[tag.lower()]= tbdata.field(tag) elif kwargs.has_key('a'): self.a= kwargs['a'] if kwargs.has_key('acov'): self.acov= kwargs['acov'] else: self.acov= nu.zeros(self.a.shape) if kwargs.has_key('weight'): self.weight= kwargs['weight'] self.da= self.a.shape[1]
self.log = logging.getLogger()
self.log = logging.getLogger( '%s.%s' % (__name__, self.__class__.__name__)) self.workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) self.build_dir = os.path.join(self.workingbranch_dir, 'build') if not os.path.isdir(self.build_dir): self.log.debug('Creating build dir: %s' % (self.build_dir,)) os.mkdir(self.build_dir) else: self.log.debug('Using build dir: %s' % (self.build_dir,))
def __init__(self, buildversion, log=None): self.buildversion = buildversion if log is not None: self.log = log else: self.log = logging.getLogger()
workingbranch_dir = os.path.join(os.path.dirname(__file__), '..') build_dir = os.path.join(workingbranch_dir, 'build') if not os.path.isdir(build_dir): self.log.debug('Creating working dir: %s' % (build_dir,)) os.mkdir(build_dir) else: self.log.debug('Using working dir: %s' % (build_dir,))
workingbranch_dir = self.workingbranch_dir build_dir = self.build_dir
def main(self, argv): """ The main entry point for the build-apidocs command
))
), stdout=PIPE, stderr=PIPE,)
def producer(): with open(yuizip_path, 'w') as yuizip: self.log.debug('Downloading YUI Doc') download = urlopen(YUIDOC_URL) while True: bytes = download.read(1024*10) if not bytes: break else: yuizip.write(bytes) yield bytes
workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) build_dir = os.path.join(workingbranch_dir, 'build')
workingbranch_dir = self.workingbranch_dir build_dir = self.build_dir self.log.debug('Export versioned files to a build folder')
def main(self, argv): workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..'))
self.log.debug('Record the branch version')
def main(self, argv): workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..'))
self.log.debug('Generate apidocs')
def main(self, argv): workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..'))
self.log.debug('Generate archive')
def main(self, argv): workingbranch_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..'))
self.log.error("isURICorrect: %s" % str(e))
pass
def isURICorrect(self, uri): """ """ returned = False try: data = self.dbc.getBrowserUri() except viewpointdirect.BrowserNotPresent, e: self.log.error("isURICorrect: %s" % str(e)) else: if data: rc = simplejson.loads(data) vp_uri = rc['data'] admin_uri = self.config.get('admin_uri', None) if vp_uri.startswith(uri) or vp_uri.startswith(admin_uri): returned = True else: self.log.info("isURICorrect: current URI:'%s', correct URI:'%s'." % (vp_uri, uri)) return returned
if vp_uri.startswith(uri) or vp_uri.startswith(admin_uri): returned = True
if admin_uri: if vp_uri.startswith(uri) or vp_uri.startswith(admin_uri): returned = True else: self.log.info("isURICorrect: allowed URI:'%s', incorrect URI:'%s'." % ((vp_uri, admin_uri), uri))
def isURICorrect(self, uri): """ """ returned = False try: data = self.dbc.getBrowserUri() except viewpointdirect.BrowserNotPresent, e: self.log.error("isURICorrect: %s" % str(e)) else: if data: rc = simplejson.loads(data) vp_uri = rc['data'] admin_uri = self.config.get('admin_uri', None) if vp_uri.startswith(uri) or vp_uri.startswith(admin_uri): returned = True else: self.log.info("isURICorrect: current URI:'%s', correct URI:'%s'." % (vp_uri, uri)) return returned
self.log.info("isURICorrect: current URI:'%s', correct URI:'%s'." % (vp_uri, uri))
if vp_uri.startswith(uri): returned = True else: self.log.info("isURICorrect: current URI:'%s', incorrect URI:'%s'." % (vp_uri, uri))
def isURICorrect(self, uri): """ """ returned = False try: data = self.dbc.getBrowserUri() except viewpointdirect.BrowserNotPresent, e: self.log.error("isURICorrect: %s" % str(e)) else: if data: rc = simplejson.loads(data) vp_uri = rc['data'] admin_uri = self.config.get('admin_uri', None) if vp_uri.startswith(uri) or vp_uri.startswith(admin_uri): returned = True else: self.log.info("isURICorrect: current URI:'%s', correct URI:'%s'." % (vp_uri, uri)) return returned
done_count = 0
def do_queued_updates(self, amount=-1): """ Index <amount> entries from the indexer queue.
macro.request.include_id = incl_id
macro.request.uid_generator.include_id = incl_id
def macro_TableOfContents(macro, maxdepth=int): """
<td style="border:0">
def enabled(val): return not val and u' disabled="disabled"' or u''
</td>
def enabled(val): return not val and u' disabled="disabled"' or u''
%%s
<td style="border:0"> %%s </td>
def enabled(val): return not val and u' disabled="disabled"' or u''
avv = av["Access Vector"][:1]
avv = base_metas["Access Vector"][:1]
def buildVector(base_metas): vector = "" avset = set(['Local', 'Network', 'Adjacent Network']) acset = set(['High', 'Medium', 'Low']) auset = set(['Multiple', 'Single', 'None']) avv = av["Access Vector"][:1] if avv not in avset: return None else: vector += "AV:" + avv[0] + "/" acv = ac["Access Complexity"][:1] if acv not in acset: return None else: vector += "AC:" + acv[0] + "/" auv = au["Authentication"][:1] if auv not in auset: return None else: vector += "Au:" + auv[0] + "/C:C/I:C/A:C" return vector
acv = ac["Access Complexity"][:1]
acv = base_metas["Access Complexity"][:1]
def buildVector(base_metas): vector = "" avset = set(['Local', 'Network', 'Adjacent Network']) acset = set(['High', 'Medium', 'Low']) auset = set(['Multiple', 'Single', 'None']) avv = av["Access Vector"][:1] if avv not in avset: return None else: vector += "AV:" + avv[0] + "/" acv = ac["Access Complexity"][:1] if acv not in acset: return None else: vector += "AC:" + acv[0] + "/" auv = au["Authentication"][:1] if auv not in auset: return None else: vector += "Au:" + auv[0] + "/C:C/I:C/A:C" return vector
auv = au["Authentication"][:1]
auv = base_metas["Authentication"][:1]
def buildVector(base_metas): vector = "" avset = set(['Local', 'Network', 'Adjacent Network']) acset = set(['High', 'Medium', 'Low']) auset = set(['Multiple', 'Single', 'None']) avv = av["Access Vector"][:1] if avv not in avset: return None else: vector += "AV:" + avv[0] + "/" acv = ac["Access Complexity"][:1] if acv not in acset: return None else: vector += "AC:" + acv[0] + "/" auv = au["Authentication"][:1] if auv not in auset: return None else: vector += "Au:" + auv[0] + "/C:C/I:C/A:C" return vector
assert second.acquire(timeout + 0.1)
assert second.acquire(timeout + 0.2)
def testAcquireAfterTimeout(self): """ util.lock: ExclusiveLock: acquire after timeout
if vals and addpagename:
if addpagename: result += f.listitem(1, **entryfmt)
def construct_table(macro, pagelist, metakeys, legend='', checkAccess=True, styles=dict(), addpagename=False): request = macro.request request.page.formatter = request.formatter _ = request.getText row = 0 entryfmt = {'class': 'metamatrix_entry'} # Start table request.write(macro.formatter.linebreak() + u'<div class="metamatrix">' + macro.formatter.table(1)) # Give a class to headers to make it customisable request.write(macro.formatter.table_row(1, {'rowclass': 'meta_head'})) # Upper left cell is empty or has the desired legend t_cell(macro, [legend]) x_key, y_key = metakeys[:2] x_values, y_values = set(), set() page_vals = dict() for page in pagelist: page_vals[page] = get_metas(request, page, metakeys, checkAccess=False) x_values.update(page_vals[page].get(x_key, set())) y_values.update(page_vals[page].get(y_key, set())) metakeys = metakeys[2:] # Make header row for oval, value in sorted((ordervalue(y), y) for y in y_values): style = styles.get(y_key, dict()) # Styles can modify key naming name = style.get('gwikiname', '').strip('"') # We don't want stuff like bullet lists in out header headerstyle = dict() for st in style: if not st.startswith('gwiki'): headerstyle[st] = style[st] if name: t_cell(macro, [name], style=headerstyle) else: t_cell(macro, [value], style=headerstyle) request.write(macro.formatter.table_row(0)) tmp_page = request.page f = macro.formatter # Table for oval, x_value in sorted((ordervalue(x), x) for x in x_values): row = row + 1 if row % 2: request.write(f.table_row(1, {'rowclass': 'metamatrix-odd-row'})) else: request.write(f.table_row(1, {'rowclass': 'metamatrix-even-row'})) t_cell(macro, [x_value]) for oval, y_value in sorted((ordervalue(y), y) for y in y_values): style = styles.get(y_value, dict()) if not style.has_key('class'): style['class'] = 'meta_cell' macro.request.write(f.table_cell(1, attrs=style)) for page in pagelist: pageobj = Page(request, page) if (x_value in page_vals[page].get(x_key, set()) and y_value in page_vals[page].get(y_key, set())): result = '' args = {'class': 'metamatrix_link'} # Were there vals? vals = None for key in metakeys: for val in page_vals[page].get(key, list()): # Strip ugly brackets from bracketed links val = val.lstrip('[').strip(']') result += f.listitem(1, **entryfmt) result += pageobj.link_to(request, text=val, **args) result += f.listitem(0) vals = val if vals and addpagename: result += pageobj.link_to(request, **args) macro.request.write(result) request.write(macro.formatter.table_row(0)) request.page = tmp_page request.formatter.page = tmp_page request.write(macro.formatter.table(0)) request.write(u'</div>')
result += f.listitem(0)
def construct_table(macro, pagelist, metakeys, legend='', checkAccess=True, styles=dict(), addpagename=False): request = macro.request request.page.formatter = request.formatter _ = request.getText row = 0 entryfmt = {'class': 'metamatrix_entry'} # Start table request.write(macro.formatter.linebreak() + u'<div class="metamatrix">' + macro.formatter.table(1)) # Give a class to headers to make it customisable request.write(macro.formatter.table_row(1, {'rowclass': 'meta_head'})) # Upper left cell is empty or has the desired legend t_cell(macro, [legend]) x_key, y_key = metakeys[:2] x_values, y_values = set(), set() page_vals = dict() for page in pagelist: page_vals[page] = get_metas(request, page, metakeys, checkAccess=False) x_values.update(page_vals[page].get(x_key, set())) y_values.update(page_vals[page].get(y_key, set())) metakeys = metakeys[2:] # Make header row for oval, value in sorted((ordervalue(y), y) for y in y_values): style = styles.get(y_key, dict()) # Styles can modify key naming name = style.get('gwikiname', '').strip('"') # We don't want stuff like bullet lists in out header headerstyle = dict() for st in style: if not st.startswith('gwiki'): headerstyle[st] = style[st] if name: t_cell(macro, [name], style=headerstyle) else: t_cell(macro, [value], style=headerstyle) request.write(macro.formatter.table_row(0)) tmp_page = request.page f = macro.formatter # Table for oval, x_value in sorted((ordervalue(x), x) for x in x_values): row = row + 1 if row % 2: request.write(f.table_row(1, {'rowclass': 'metamatrix-odd-row'})) else: request.write(f.table_row(1, {'rowclass': 'metamatrix-even-row'})) t_cell(macro, [x_value]) for oval, y_value in sorted((ordervalue(y), y) for y in y_values): style = styles.get(y_value, dict()) if not style.has_key('class'): style['class'] = 'meta_cell' macro.request.write(f.table_cell(1, attrs=style)) for page in pagelist: pageobj = Page(request, page) if (x_value in page_vals[page].get(x_key, set()) and y_value in page_vals[page].get(y_key, set())): result = '' args = {'class': 'metamatrix_link'} # Were there vals? vals = None for key in metakeys: for val in page_vals[page].get(key, list()): # Strip ugly brackets from bracketed links val = val.lstrip('[').strip(']') result += f.listitem(1, **entryfmt) result += pageobj.link_to(request, text=val, **args) result += f.listitem(0) vals = val if vals and addpagename: result += pageobj.link_to(request, **args) macro.request.write(result) request.write(macro.formatter.table_row(0)) request.page = tmp_page request.formatter.page = tmp_page request.write(macro.formatter.table(0)) request.write(u'</div>')
t_cell(macro, "%s: MetaValueDistribution(%s)" % (error, ",".join(args)))
t_cell(macro, "%s: MetaValueDistribution(%s)" % (error, args))
def show_error(macro, args, error): request = macro.request request.write(macro.formatter.linebreak() + u'<div class="metatable">' + macro.formatter.table(1)) request.write(macro.formatter.table_row(1)) t_cell(macro, "%s: MetaValueDistribution(%s)" % (error, ",".join(args))) request.write(macro.formatter.table_row(0)) request.write(macro.formatter.table(0) + u'</div>')
print gout
def checking_loop(wiki): url = wiki.host while True: #Get all new history pages with pending status info('Lookig for pages') picked_pages = wiki.getMeta('CategoryHistory, overallvalue=pending') info('Found %d pages' % len(picked_pages)) if not picked_pages: info('No pages. Sleeping') time.sleep(10) continue #go thgrough all new pages for page in picked_pages: info('%s: picked %s' % (url, page)) path = tempfile.mkdtemp() os.chdir(path) info("Created tempdir %s" % path) #change the status to picked wiki.setMeta(page, {'overallvalue' : ['picked']}, True) metas = picked_pages[page] user = metas['user'].single().strip('[]') # get the attachment filename from the file meta info('Writing files') for filename in metas['file']: attachment_file = removeLink(filename) #get the source code info("Fetching sourcode from %s" % attachment_file) try: code = wiki.getAttachment(page, attachment_file) except opencollab.wiki.WikiFault, e: if 'There was an error in the wiki side (Nonexisting attachment' in e.args[0]: code = '' else: raise # get rid of the _rev<number> in filenames open(re.sub('(_rev\d+)', '', removeLink(filename)), 'w').write(code) revision = re.search('_rev(\d+)', removeLink(filename)).group(1) #if there is wrong amount of question page linksd, leave #the returned assignment as picked so that other #assignments can be checked. if len(metas['question']) != 1: error('Invalid meta data in %s! There we %d values!\n' % (page, len(metas['question']))) continue #get the question pagenmae question = metas['question'].single(None) question = question.strip('[]') #find associataed answerpages answer_pages = wiki.getMeta(question +'/options').values()[0]['answer'] info("Found %d answer pages" % len(answer_pages)) regex = re.compile('{{{\s*(.*)\s*}}}', re.DOTALL) wrong = list() right = list() outputs = list() for apage in [x.strip('[]') for x in answer_pages]: info('getting answers from %s' % apage) answer_meta = wiki.getMeta(apage).values()[0] testname = answer_meta['testname'].single() outputpage = None inputpage = None if 'output' in answer_meta: outputpage = answer_meta['output'].single().strip('[]') outfilesatt = wiki.listAttachments if 'input' in answer_meta: inputpage = answer_meta['input'].single().strip('[]') args = answer_meta['parameters'].single() input = '' if inputpage: content = wiki.getPage(inputpage) input = regex.search(content).group(1) input_meta = wiki.getMeta(inputpage) filelist = input_meta[inputpage]['file'] for attachment in filelist: filename = removeLink(attachment) content = wiki.getAttachment(inputpage, filename) info('Writing input file %s' % filename) open(os.path.join(path, filename), 'w').write(content) output = '' if outputpage: content = wiki.getPage(outputpage) output = regex.search(content).group(1) output_meta = wiki.getMeta(outputpage) # get output files output_files = dict() filelist = output_meta[outputpage]['file'] for attachment in filelist: filename = removeLink(attachment) content = wiki.getAttachment(outputpage, filename) output_files[filename] = content info('Running test') goutput, gerror, timeout, gfiles = run(args, input, path) goutput = goutput.strip('\n') output = output.strip('\n') goutput = gerror.strip('\n') + goutput if timeout: goutput = goutput + "\n***** TIMEOUT *****\nYOUR PROGRAM TIMED OUT!\n\n" + goutput if len(goutput) > 1024*100: goutput = "***** Your program produced more than 100kB of output data *****\n(Meaning that your program failed)\nPlease check your code before returning it\n" info('Excess output!') passed = True if goutput != output: info("Test %s failed" % testname) failed = False # compare output files for filename, content in output_files.items(): if filename not in gfiles: info("A file is missing") passed = False break if content != gfiles[filename]: info("Output file does not match") passed = False break if passed: info("Test %s succeeded" % testname) right.append(testname) else: info("Test %s failed" % testname) wrong.append(testname) #put user output to wiki. outputs.append('[[%s]]' % (user + '/' + outputpage,)) try: wiki.putPage(user + '/' + outputpage, outputtemplate % (esc(goutput), testname)) for ofilename, ocontent in gfiles.items(): wiki.putAttachment(user + '/' + outputpage, ofilename, ocontent) except opencollab.wiki.WikiFault, error_message: # It's ok if the comment does not change if 'There was an error in the wiki side (You did not change the page content, not saved!)' in error_message: pass elif 'There was an error in the wiki side (Attachment not saved, file exists)' in error_message: pass else: raise # put output file metas to output page wiki.setMeta(user + '/' + outputpage, {'file' : ['[[attachment:%s]]' % x for x in gfiles.keys()]}) info('Removing ' + path) shutil.rmtree(path) metas = dict() #clear old info info('Clearing old metas') wiki.setMeta(page, {'wrong': [], 'right': []}, True) if len(wrong) == 0: metas['overallvalue'] = ['success'] else: metas['overallvalue'] = ['failure'] if outputs: metas['output'] = outputs if wrong: metas['wrong'] = wrong if right: metas['right'] = right info('Setting new metas') #add metas wiki.setMeta(page, metas, True) info('Done') time.sleep(5)
info("Test %s failed" % testname) failed = False
passed = False
def checking_loop(wiki): url = wiki.host while True: #Get all new history pages with pending status info('Lookig for pages') picked_pages = wiki.getMeta('CategoryHistory, overallvalue=pending') info('Found %d pages' % len(picked_pages)) if not picked_pages: info('No pages. Sleeping') time.sleep(10) continue #go thgrough all new pages for page in picked_pages: info('%s: picked %s' % (url, page)) path = tempfile.mkdtemp() os.chdir(path) info("Created tempdir %s" % path) #change the status to picked wiki.setMeta(page, {'overallvalue' : ['picked']}, True) metas = picked_pages[page] user = metas['user'].single().strip('[]') # get the attachment filename from the file meta info('Writing files') for filename in metas['file']: attachment_file = removeLink(filename) #get the source code info("Fetching sourcode from %s" % attachment_file) try: code = wiki.getAttachment(page, attachment_file) except opencollab.wiki.WikiFault, e: if 'There was an error in the wiki side (Nonexisting attachment' in e.args[0]: code = '' else: raise # get rid of the _rev<number> in filenames open(re.sub('(_rev\d+)', '', removeLink(filename)), 'w').write(code) revision = re.search('_rev(\d+)', removeLink(filename)).group(1) #if there is wrong amount of question page linksd, leave #the returned assignment as picked so that other #assignments can be checked. if len(metas['question']) != 1: error('Invalid meta data in %s! There we %d values!\n' % (page, len(metas['question']))) continue #get the question pagenmae question = metas['question'].single(None) question = question.strip('[]') #find associataed answerpages answer_pages = wiki.getMeta(question +'/options').values()[0]['answer'] info("Found %d answer pages" % len(answer_pages)) regex = re.compile('{{{\s*(.*)\s*}}}', re.DOTALL) wrong = list() right = list() outputs = list() for apage in [x.strip('[]') for x in answer_pages]: info('getting answers from %s' % apage) answer_meta = wiki.getMeta(apage).values()[0] testname = answer_meta['testname'].single() outputpage = None inputpage = None if 'output' in answer_meta: outputpage = answer_meta['output'].single().strip('[]') outfilesatt = wiki.listAttachments if 'input' in answer_meta: inputpage = answer_meta['input'].single().strip('[]') args = answer_meta['parameters'].single() input = '' if inputpage: content = wiki.getPage(inputpage) input = regex.search(content).group(1) input_meta = wiki.getMeta(inputpage) filelist = input_meta[inputpage]['file'] for attachment in filelist: filename = removeLink(attachment) content = wiki.getAttachment(inputpage, filename) info('Writing input file %s' % filename) open(os.path.join(path, filename), 'w').write(content) output = '' if outputpage: content = wiki.getPage(outputpage) output = regex.search(content).group(1) output_meta = wiki.getMeta(outputpage) # get output files output_files = dict() filelist = output_meta[outputpage]['file'] for attachment in filelist: filename = removeLink(attachment) content = wiki.getAttachment(outputpage, filename) output_files[filename] = content info('Running test') goutput, gerror, timeout, gfiles = run(args, input, path) goutput = goutput.strip('\n') output = output.strip('\n') goutput = gerror.strip('\n') + goutput if timeout: goutput = goutput + "\n***** TIMEOUT *****\nYOUR PROGRAM TIMED OUT!\n\n" + goutput if len(goutput) > 1024*100: goutput = "***** Your program produced more than 100kB of output data *****\n(Meaning that your program failed)\nPlease check your code before returning it\n" info('Excess output!') passed = True if goutput != output: info("Test %s failed" % testname) failed = False # compare output files for filename, content in output_files.items(): if filename not in gfiles: info("A file is missing") passed = False break if content != gfiles[filename]: info("Output file does not match") passed = False break if passed: info("Test %s succeeded" % testname) right.append(testname) else: info("Test %s failed" % testname) wrong.append(testname) #put user output to wiki. outputs.append('[[%s]]' % (user + '/' + outputpage,)) try: wiki.putPage(user + '/' + outputpage, outputtemplate % (esc(goutput), testname)) for ofilename, ocontent in gfiles.items(): wiki.putAttachment(user + '/' + outputpage, ofilename, ocontent) except opencollab.wiki.WikiFault, error_message: # It's ok if the comment does not change if 'There was an error in the wiki side (You did not change the page content, not saved!)' in error_message: pass elif 'There was an error in the wiki side (Attachment not saved, file exists)' in error_message: pass else: raise # put output file metas to output page wiki.setMeta(user + '/' + outputpage, {'file' : ['[[attachment:%s]]' % x for x in gfiles.keys()]}) info('Removing ' + path) shutil.rmtree(path) metas = dict() #clear old info info('Clearing old metas') wiki.setMeta(page, {'wrong': [], 'right': []}, True) if len(wrong) == 0: metas['overallvalue'] = ['success'] else: metas['overallvalue'] = ['failure'] if outputs: metas['output'] = outputs if wrong: metas['wrong'] = wrong if right: metas['right'] = right info('Setting new metas') #add metas wiki.setMeta(page, metas, True) info('Done') time.sleep(5)
content_query = super(TextSearch, self).xapian_term(request, connection) title_query = TitleSearch(self._pattern, use_re=self.use_re, case=self.case).xapian_term(request, connection) return Query(OP_OR, [title_query, content_query])
if self.use_re: return Query('') else: content_query = super(TextSearch, self).xapian_term(request, connection) title_query = TitleSearch(self._pattern, use_re=self.use_re, case=self.case).xapian_term(request, connection) return Query(OP_OR, [title_query, content_query]) def xapian_need_postproc(self): return self.case or self.use_re
def xapian_term(self, request, connection):
try: slideNumber = int(self.request.values.get('n', 1)) if not 1 <= slideNumber <= len(self.page):
slideNumber = self.request.values.get('n', 1) if slideNumber == "all": slideNumber = None else: try: slideNumber = int(slideNumber) if not 1 <= slideNumber <= len(self.page): slideNumber = 1 except ValueError:
def setSlideNumber(self): try: slideNumber = int(self.request.values.get('n', 1)) if not 1 <= slideNumber <= len(self.page): slideNumber = 1 except ValueError: slideNumber = 1 self.slideNumber = slideNumber
except ValueError: slideNumber = 1
def setSlideNumber(self): try: slideNumber = int(self.request.values.get('n', 1)) if not 1 <= slideNumber <= len(self.page): slideNumber = 1 except ValueError: slideNumber = 1 self.slideNumber = slideNumber
start = max(first, self.slideNumber - other / 2)
start = max(first, (self.slideNumber or 1) - other / 2)
def slideLinksRange(self): """ Return range of slides to display, current centered """ other = self.maxSlideLinks - 1 # other slides except current first, last = self.first_slide(), self.last_slide() start = max(first, self.slideNumber - other / 2) end = min(start + other, last) start = max(first, end - other) return range(start, end + 1)
return min(self.slideNumber + 1, self.last_slide())
return min((self.slideNumber or 1) + 1, self.last_slide())
def next_slide(self): return min(self.slideNumber + 1, self.last_slide())
return max(self.slideNumber - 1, self.first_slide())
return max((self.slideNumber or 1) - 1, self.first_slide())
def previous_slide(self): return max(self.slideNumber - 1, self.first_slide())
def item_language_attribtues(self):
def item_language_attributes(self):
def item_language_attribtues(self): return self.languageAttributes(self.request.content_lang)
def item_slide_title(self): return wikiutil.escape(self.page.titleAt(self.slideNumber)) def item_slide_body(self): text = self.page.bodyAt(self.slideNumber)
def item_slides(self): if self.slideNumber is None: slides = [] for n in xrange(0, len(self.page)): slides.append(slide_template % { 'slide_title' : self.item_slide_title(n + 1), 'slide_body' : self.item_slide_body(n + 1) }) return ''.join(slides) else: return slide_template % self def item_slide_title(self, number=None): return wikiutil.escape(self.page.titleAt(number or self.slideNumber)) def item_slide_body(self, number=None): text = self.page.bodyAt(number or self.slideNumber)
def item_slide_title(self): return wikiutil.escape(self.page.titleAt(self.slideNumber))
return "%d|%d" % (self.slideNumber, self.last_slide())
if self.slideNumber is not None: return "%d|%d" % (self.slideNumber, self.last_slide()) else: return ''
def item_counter(self): return "%d|%d" % (self.slideNumber, self.last_slide())
<html%(language_attribtues)s>
<html%(language_attributes)s>
def item_counter(self): return "%d|%d" % (self.slideNumber, self.last_slide())
<h1>%(slide_title)s</h1> <div id="content"> %(slide_body)s </div>
%(slides)s
def item_counter(self): return "%d|%d" % (self.slideNumber, self.last_slide())
<ul id="date">%(date)s</ul> <ul id="author">%(author)s</ul> <ul id="counter">%(counter)s</ul>
<ul> <li id="date">%(date)s</li> <li id="author">%(author)s</li> <li id="counter">%(counter)s</li> </ul>
def item_counter(self): return "%d|%d" % (self.slideNumber, self.last_slide())
<td style="border:0; width:1%%">
<td style="border:0;">
def enabled(val): return not val and u' disabled="disabled"' or u''
<div style="text-align:left">
<div style="text-align:%s">
def enabled(val): return not val and u' disabled="disabled"' or u''
page_url, prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
def enabled(val): return not val and u' disabled="disabled"' or u''
page_url, newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )
page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )
def enabled(val): return not val and u' disabled="disabled"' or u''
class FixedFilesystemSessionStore(FilesystemSessionStore): """ Fix buggy implementation of .get() in werkzeug <= 0.5: If you try to get(somesid) and the file with the contents of sid storage does not exist or is troublesome somehow, it will create a new session with a new sid in werkzeug 0.5 original implementation. But we do not want to store a session file for new and empty sessions, but rather wait for the 2nd request and see whether the user agent sends the cookie back to us. If it doesn't support cookies, we don't want to create one new session file per request. If it does support cookies, we need to use .get() with the sid although there was no session file stored for that sid in the first request. TODO: try to get it into werkzeug codebase and remove this class after we REQUIRE a werkzeug release > 0.5 that has it. """
_fs_transaction_suffix = '.__wz_sess' class FilesystemSessionStore(SessionStore): """Simple example session store that saves sessions in the filesystem like PHP does. .. versionchanged:: 0.6 `renew_missing` was added. Previously this was considered `True`, now the default changed to `False` and it can be explicitly deactivated. :param path: the path to the folder used for storing the sessions. If not provided the default temporary directory is used. :param filename_template: a string template used to give the session a filename. ``%s`` is replaced with the session id. :param session_class: The session class to use. Defaults to :class:`Session`. :param renew_missing: set to `True` if you want the store to give the user a new sid if the session was not yet saved. """ def __init__(self, path=None, filename_template='werkzeug_%s.sess', session_class=None, renew_missing=False, mode=0644): SessionStore.__init__(self, session_class) if path is None: path = gettempdir() self.path = path if isinstance(filename_template, unicode): filename_template = filename_template.encode( sys.getfilesystemencoding() or 'utf-8') assert not filename_template.endswith(_fs_transaction_suffix), \ 'filename templates may not end with %s' % _fs_transaction_suffix self.filename_template = filename_template self.renew_missing = renew_missing self.mode = mode def get_session_filename(self, sid): if isinstance(sid, unicode): sid = sid.encode('utf-8') return path.join(self.path, self.filename_template % sid) def save(self, session): def _dump(filename): f = file(filename, 'wb') try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() fn = self.get_session_filename(session.sid) if os.name == 'posix': td, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path) _dump(tmp) try: os.rename(tmp, fn) except (IOError, OSError): pass os.chmod(fn, self.mode) else: _dump(fn) try: os.chmod(fn, self.mode) except OSError: pass def delete(self, session): fn = self.get_session_filename(session.sid) try: os.unlink(fn) except OSError: pass
def __repr__(self): # TODO: try to get this into werkzeug codebase return '<%s %s %s%s>' % ( self.__class__.__name__, self.sid, # we want to see sid dict.__repr__(self), self.should_save and '*' or '' )
fn = self.get_session_filename(sid) f = None
def get(self, sid): if not self.is_valid_key(sid): return self.new() fn = self.get_session_filename(sid) f = None try: try: f = open(fn, 'rb') data = load(f) except (IOError, EOFError, KeyError): # XXX check completeness/correctness # Note: we do NOT generate a new sid in case of trouble with session *contents* # IOError: [Errno 2] No such file or directory # IOError: [Errno 13] Permission denied (we will notice permission problems when writing) # EOFError: when trying to load("") - no contents # KeyError: when trying to load("xxx") - crap contents data = {} finally: if f: f.close() return self.session_class(data, sid, False)
f = open(fn, 'rb') data = load(f) except (IOError, EOFError, KeyError): data = {} finally: if f:
try: data = load(f) except Exception: data = {} finally:
def get(self, sid): if not self.is_valid_key(sid): return self.new() fn = self.get_session_filename(sid) f = None try: try: f = open(fn, 'rb') data = load(f) except (IOError, EOFError, KeyError): # XXX check completeness/correctness # Note: we do NOT generate a new sid in case of trouble with session *contents* # IOError: [Errno 2] No such file or directory # IOError: [Errno 13] Permission denied (we will notice permission problems when writing) # EOFError: when trying to load("") - no contents # KeyError: when trying to load("xxx") - crap contents data = {} finally: if f: f.close() return self.session_class(data, sid, False)
fd, temp_fname = tempfile.mkstemp(suffix='.tmp', dir=self.path)
fd, temp_fname = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
def save(self, session): fd, temp_fname = tempfile.mkstemp(suffix='.tmp', dir=self.path) f = os.fdopen(fd, 'wb') try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() filesys.chmod(temp_fname, 0666 & config.umask) # relax restrictive mode from mkstemp fname = self.get_session_filename(session.sid) # this is either atomic or happening with real locks set: filesys.rename(temp_fname, fname)
filesys.chmod(temp_fname, 0666 & config.umask)
filesys.chmod(temp_fname, self.mode)
def save(self, session): fd, temp_fname = tempfile.mkstemp(suffix='.tmp', dir=self.path) f = os.fdopen(fd, 'wb') try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() filesys.chmod(temp_fname, 0666 & config.umask) # relax restrictive mode from mkstemp fname = self.get_session_filename(session.sid) # this is either atomic or happening with real locks set: filesys.rename(temp_fname, fname)
filesys.rename(temp_fname, fname) """ Adds functionality missing in werkzeug 0.5: getting a list of all SIDs, so that purging sessions can be implemented. """ def get_all_sids(self): """ return a list of all session ids (sids) """ import re regex = re.compile(re.escape(self.filename_template).replace( r'\%s', r'([0-9a-fA-F]+)')) sids = [] for fn in os.listdir(self.path): m = regex.match(fn) if m: sids.append(m.group(1)) return sids
filesys.rename(temp_fname, fname) """ Problem: werkzeug 0.6 uses inconsistent encoding for template and filename """ def _encode_fs(self, name): if isinstance(name, unicode): name = name.encode(sys.getfilesystemencoding() or 'utf-8') return name def get_session_filename(self, sid): sid = self._encode_fs(sid) return path.join(self.path, self.filename_template % sid) class MoinSession(Session): """ Compatibility interface to Werkzeug-sessions for old Moin-code. is_new is DEPRECATED and will go away soon. """ def _get_is_new(self): logging.warning("Deprecated use of MoinSession.is_new, please use .new") return self.new is_new = property(_get_is_new)
def save(self, session): fd, temp_fname = tempfile.mkstemp(suffix='.tmp', dir=self.path) f = os.fdopen(fd, 'wb') try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() filesys.chmod(temp_fname, 0666 & config.umask) # relax restrictive mode from mkstemp fname = self.get_session_filename(session.sid) # this is either atomic or happening with real locks set: filesys.rename(temp_fname, fname)
return FixedFilesystemSessionStore(path=path, filename_template='%s', session_class=MoinSession)
return FixedFilesystemSessionStore(path=path, filename_template='%s', session_class=MoinSession, mode=0666 & config.umask)
def _store_get(self, request): path = request.cfg.session_dir try: filesys.mkdir(path) except OSError: pass return FixedFilesystemSessionStore(path=path, filename_template='%s', session_class=MoinSession)
return store.get_all_sids()
return store.list()
def get_all_session_ids(self, request): store = self._store_get(request) return store.get_all_sids()
session.modified):
session.should_save):
def finalize(self, request, session): if request.user.auth_method == 'setuid': userobj = request._setuid_real_user setuid = request.user.id else: userobj = request.user setuid = None logging.debug("finalize userobj = %r, setuid = %r" % (userobj, setuid)) cfg = request.cfg # we use different cookie names for different wikis: cookie_name = get_cookie_name(request, name=request.cfg.cookie_name, usage=self.cookie_usage) # we always use path='/' except if explicitly overridden by configuration, # which is usually not needed and not recommended: cookie_path = cfg.cookie_path or '/' if userobj and userobj.valid: session['user.id'] = userobj.id session['user.auth_method'] = userobj.auth_method session['user.auth_attribs'] = userobj.auth_attribs if setuid: session['setuid'] = setuid elif 'setuid' in session: del session['setuid'] logging.debug("after auth: storing valid user into session: %r" % userobj.name) else: logging.debug("after auth: user is invalid") if 'user.id' in session: logging.debug("after auth: destroying session: %r" % session) self.destroy_session(request, session) logging.debug("after auth: deleting session cookie!") request.delete_cookie(cookie_name, path=cookie_path, domain=cfg.cookie_domain)
m = self.addr_re.match(target)
m = self.rules.addr_re.match(target)
def image_emit(self, node): target = node.content text = self.get_text(node) m = self.addr_re.match(target) if m: if m.group('page_name'): # inserted anchors url = wikiutil.url_unquote(target) if target.startswith('#'): return self.formatter.anchordef(url[1:]) # default to images return self.formatter.attachment_image( url, alt=text, html_class='image') elif m.group('extern_addr'): # external link address = m.group('extern_addr') proto = m.group('extern_proto') url = wikiutil.url_unquote(address) return self.formatter.image( src=url, alt=text, html_class='external_image') elif m.group('attach_scheme'): # link to an attachment scheme = m.group('attach_scheme') attachment = m.group('attach_addr') url = wikiutil.url_unquote(attachment) if scheme == 'image': return self.formatter.attachment_image( url, alt=text, html_class='image') elif scheme == 'drawing': url = wikiutil.drawing2fname(url) return self.formatter.attachment_drawing(url, text, alt=text) else: pass elif m.group('inter_wiki'): # interwiki link pass
self.request.theme.add_msg(unicode(err), "error")
self.request.theme.add_msg(wikiutil.escape(unicode(err)), "error")
def execute(self): _ = self.request.getText try: self.setSlideNumber() language = self.page.pi['language'] self.request.setContentLanguage(language) self.request.write(self.template % self) except Error, err: self.request.theme.add_msg(unicode(err), "error") self.page.send_page()
'upload_label_rename': _('Page Name'),
'upload_label_rename': _('Page name'),
def get_form_html(self, buttons_html): _ = self._ return """
return orig_feed(self, re.sub(ur'[\x00-\x08\x0b-\x19]', '?', data))
return orig_feed(self, esc(data))
def monkey_feed(self, data): return orig_feed(self, re.sub(ur'[\x00-\x08\x0b-\x19]', '?', data))