rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
mb.code_creator.replace_included_headers( ["xfx.h"], False )
mb.code_creator.adopt_creator( code_creators.include_t( 'xfx.h' ), 0 )
def camel_convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
mb.class_( lambda x: x.name.startswith( 'HWND' ) ).include( )
def camel_convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
mb.member_functions( "Instance" ).call_policies = call_policies.return_internal_reference( ) mb.free_function( "gGetApplication" ).call_policies = call_policies.return_internal_reference( )
mb.member_functions( "Instance" ).call_policies = call_policies.return_value_policy( call_policies.reference_existing_object ) mb.free_function( "gGetApplication" ).call_policies = call_policies.return_value_policy( call_policies.reference_existing_object )
def camel_convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
mb.class_( "Quaternion" ).member_function( "FromAxisAngle" ).call_policies = call_policies.return_internal_reference( ) mb.class_( "Mat3" ).member_function( "MakeIdentity" ).call_policies = call_policies.return_internal_reference( ) mb.class_( "Mat4" ).member_function( "MakeIdentity" ).call_policies = call_policies.return_internal_reference( ) mb.class_( "Euler" ).member_functions( lambda x: x.name in ( "FromMat4", "FromVec3", "FromQuaternion" ) ).call_policies = call_policies.return_internal_reference( )
mb.class_( "Quaternion" ).member_function( "FromAxisAngle" ).call_policies = call_policies.return_self( ) mb.class_( "Mat3" ).member_function( "MakeIdentity" ).call_policies = call_policies.return_self( ) mb.class_( "Mat4" ).member_function( "MakeIdentity" ).call_policies = call_policies.return_self( ) mb.class_( "Euler" ).member_functions( lambda x: x.name in ( "FromMat4", "FromVec3", "FromQuaternion" ) ).call_policies = call_policies.return_self( )
def camel_convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
mb.member_functions( "Cache", arg_types = [] ).call_policies = call_policies.return_internal_reference( )
mb.class_( "Application" ).member_function( "HWnd" ).call_policies = call_policies.return_value_policy( call_policies.return_opaque_pointer ) mb.member_functions( "Cache", arg_types = [] ).call_policies = call_policies.return_value_policy( call_policies.reference_existing_object )
def camel_convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return abs(x-y)/max(1.0,abs(x)) > 1e-12
return abs(x-y)/max(1.0,abs(x)) > 1e-4
def mismatch(x,y): return abs(x-y)/max(1.0,abs(x)) > 1e-12
print ' !!!SKIPPED'
check(ga.C_SCPL, np.complex64)
def main(): if 0 == me: if MIRROR: print ' Performing tests on Mirrored Arrays' print ' GA initialized' # note that MA is not used, so no need to initialize it # "import ga" registers malloc/free as memory allocators internally #if nproc-1 == me: if 0 == me: print 'using %d process(es) %d custer nodes' % ( nproc, ga.cluster_nnodes()) print 'process %d is on node %d with %d processes' % ( me, ga.cluster_nodeid(), ga.cluster_nprocs(-1)) ga.sync() # create array to force staggering of memory and uneven distribution # of pointers dim1 = MEM_INC mapc = [0]*nproc for i in range(nproc): mapc[i] = MEM_INC*i dim1 += MEM_INC*i g_s = ga.create_handle() ga.set_data(g_s, [dim1], ga.C_INT) ga.set_array_name(g_s, 's') ga.set_irreg_distr(g_s, mapc, [nproc]) if MIRROR: if 0 == me: print '' print ' TESTING MIRRORED ARRAYS ' print '' ga.sync() # check support for double precision arrays if 0 == me: print '' print ' CHECKING DOUBLE PRECISION ' print '' check(ga.C_DBL, np.float64) # check support for single precision complex arrays if 0 == me: print '' print ' CHECKING SINGLE COMPLEX ' print '' print ' !!!SKIPPED' #check(ga.C_SCPL, np.complex64) # check support for double precision complex arrays if 0 == me: print '' print ' CHECKING DOUBLE COMPLEX ' print '' print ' !!!SKIPPED' #check(ga.C_DCPL, np.complex128) # check support for integer arrays if 0 == me: print '' print ' CHECKING INT' print '' check(ga.C_INT, np.int32) # check support for long integer arrays if 0 == me: print '' print ' CHECKING LONG INT' print '' check(ga.C_LONG, np.int64) # check support for single precision arrays if 0 == me: print '' print ' CHECKING SINGLE PRECISION ' print '' check(ga.C_FLOAT, np.float32) if 0 == me: print '' print ' CHECKING Wrappers to Message Passing Collective ops ' print '' check_wrappers() # check if memory limits are enforced #check_mem(ma_heap*ga.nnodes()) if 0 == me: ga.print_stats() if 0 == me: print ' ' if 0 == me: print 'All tests successful' # tidy up the ga package # NO NEED -- atexit registered ga.terminate() # tidy up after message-passing library # NO NEED -- atexti registered MPI.Finalize() # Note: so long as mpi4py is imported before ga, cleanup is automatic
print ' !!!SKIPPED'
check(ga.C_DCPL, np.complex128)
def main(): if 0 == me: if MIRROR: print ' Performing tests on Mirrored Arrays' print ' GA initialized' # note that MA is not used, so no need to initialize it # "import ga" registers malloc/free as memory allocators internally #if nproc-1 == me: if 0 == me: print 'using %d process(es) %d custer nodes' % ( nproc, ga.cluster_nnodes()) print 'process %d is on node %d with %d processes' % ( me, ga.cluster_nodeid(), ga.cluster_nprocs(-1)) ga.sync() # create array to force staggering of memory and uneven distribution # of pointers dim1 = MEM_INC mapc = [0]*nproc for i in range(nproc): mapc[i] = MEM_INC*i dim1 += MEM_INC*i g_s = ga.create_handle() ga.set_data(g_s, [dim1], ga.C_INT) ga.set_array_name(g_s, 's') ga.set_irreg_distr(g_s, mapc, [nproc]) if MIRROR: if 0 == me: print '' print ' TESTING MIRRORED ARRAYS ' print '' ga.sync() # check support for double precision arrays if 0 == me: print '' print ' CHECKING DOUBLE PRECISION ' print '' check(ga.C_DBL, np.float64) # check support for single precision complex arrays if 0 == me: print '' print ' CHECKING SINGLE COMPLEX ' print '' print ' !!!SKIPPED' #check(ga.C_SCPL, np.complex64) # check support for double precision complex arrays if 0 == me: print '' print ' CHECKING DOUBLE COMPLEX ' print '' print ' !!!SKIPPED' #check(ga.C_DCPL, np.complex128) # check support for integer arrays if 0 == me: print '' print ' CHECKING INT' print '' check(ga.C_INT, np.int32) # check support for long integer arrays if 0 == me: print '' print ' CHECKING LONG INT' print '' check(ga.C_LONG, np.int64) # check support for single precision arrays if 0 == me: print '' print ' CHECKING SINGLE PRECISION ' print '' check(ga.C_FLOAT, np.float32) if 0 == me: print '' print ' CHECKING Wrappers to Message Passing Collective ops ' print '' check_wrappers() # check if memory limits are enforced #check_mem(ma_heap*ga.nnodes()) if 0 == me: ga.print_stats() if 0 == me: print ' ' if 0 == me: print 'All tests successful' # tidy up the ga package # NO NEED -- atexit registered ga.terminate() # tidy up after message-passing library # NO NEED -- atexti registered MPI.Finalize() # Note: so long as mpi4py is imported before ga, cleanup is automatic
maxproc = 4096
def check(gatype, nptype): n = 256 m = 2*n a = np.zeros((n,n), dtype=nptype) b = np.zeros((n,n), dtype=nptype) maxloop = 100 maxproc = 4096 num_restricted = 0 restricted_list = 0 iproc = me % lprocs nloop = min(maxloop,n) if USE_RESTRICTED: num_restricted = nproc/2 restricted_list = [0]*num_restricted if (num_restricted == 0): num_restricted = 1 for i in range(num_restricted): restricted_list[i] = (num_restricted/2) + i if BLOCK_CYCLIC: block_size = [32,32] if USE_SCALAPACK_DISTR: if nproc % 2 == 0: ga.error('Available procs must be divisible by 2',nproc) proc_grid = [2,nproc/2] # a[] is a local copy of what the global array should start as if MIRROR: a[:] = np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: a[:] = np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) b[:] = -1 # create a global array if NEW_API: g_a = ga.create_handle() ga.set_data(g_a, [n,n], gatype) ga.set_array_name(g_a, 'a') if USE_RESTRICTED: ga.set_restricted(g_a, restricted_list) if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_a, block_size, proc_grid) else: ga.set_block_cyclic(g_a, block_size) if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.set_pgroup(g_a, p_mirror) ga.allocate(g_a) else: if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.create_config(gatype, (n,n), 'a', None, p_mirror) else: g_a = ga.create(gatype, (n,n), 'a') if 0 == g_a: ga.error('ga.create failed') if MIRROR: lproc = me - ga.cluster_procid(inode, 0) lo,hi = ga.distribution(g_a, lproc) else: lo,hi = ga.distribution(g_a, me) ga.sync() # zero the array if 0 == me: print '> Checking zero ...' ga.zero(g_a) # check that it is indeed zero b = ga.get(g_a, buffer=b) # gets the result into supplied buffer b ga.sync() if not np.all(b == 0): ga.error('ga.zero failed') if 0 == me: print '' print ' ga.zero is OK' print '' ga.sync() # each node fills in disjoint sections of the array if 0 == me: print '> Checking disjoint put ... ' ga.sync() inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] #piece = a[lo[0]:hi[0], lo[1]:hi[1]] piece = a[ga.zip(lo,hi)] ga.put(g_a, piece, lo, hi) # the following check is not part of the original test.F result = ga.get(g_a, lo, hi) # ndarray created inside get if not np.all(result == piece): ga.error("put followed by get failed", 1) ga.sync() ij += 1 ga.sync() # all nodes check all of a b[:] = 0 b = ga.get(g_a, buffer=b) if not np.all(a == b): ga.error('put failed, exiting') if 0 == me: print '' print ' ga.put is OK' print '' # now check nloop random gets from each node if 0 == me: print '> Checking random get (%d calls)...' % nloop ga.sync() nwords = 0 random.seed(ga.nodeid()*51+1) # different seed for each proc for loop in range(nloop): ilo,ihi = random.randint(0, nloop-1),random.randint(0, nloop-1) if ihi < ilo: ilo,ihi = ihi,ilo jlo,jhi = random.randint(0, nloop-1),random.randint(0, nloop-1) if jhi < jlo: jlo,jhi = jhi,jlo nwords += (ihi-ilo+1)*(jhi-jlo+1) ihi += 1 jhi += 1 result = ga.get(g_a, (ilo,jlo), (ihi,jhi)) if not np.all(result == a[ilo:ihi,jlo:jhi]): ga.error('random get failed') if 0 == me and loop % max(1,nloop/20) == 0: print ' call %d node %d checking get((%d,%d),(%d,%d)) total %f' % ( loop, me, ilo, ihi, jlo, jhi, nwords) if 0 == me: print '' print ' ga_get is OK' print '' # each node accumulates into disjoint sections of the array if 0 == me: print '> Checking accumulate ... ' ga.sync() random.seed(12345) # same seed for each process b[:] = np.fromfunction(lambda i,j: i+j+2, (n,n), dtype=nptype) inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): x = 10.0 lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] piece = b[lo[0]:hi[0], lo[1]:hi[1]] check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: ga.acc(g_a, lo, hi, piece, x) ga.sync() ij += 1 # each process applies all updates to its local copy a[lo[0]:hi[0], lo[1]:hi[1]] += x * piece ga.sync() # all nodes check all of a if not np.all(ga.get(g_a) == a): ga.error('acc failed') if 0 == me: print '' print ' disjoint ga.acc is OK' print '' # overlapping accumulate ga.sync() if NEW_API: g_b = ga.create_handle() ga.set_data(g_b, (n,n), gatype) ga.set_array_name(g_b, 'b') if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_b, block_size, proc_grid) else: ga.set_block_cyclic(g_b, block_size) if MIRROR: ga.set_pgroup(g_b, p_mirror) if not ga.allocate(g_b): ga.error('ga.create failed for second array') else: if MIRROR: g_b = ga.create_config(gatype, (n,n), 'b', chunk, p_mirror) else: g_b = ga.create(gatype, (n,n), 'b') if 0 == g_b: ga.error('ga.create failed for second array') ga.zero(g_b) ga.acc(g_b, (n/2,n/2), (n/2+1,n/2+1), [1], 1) ga.sync() x = None if MIRROR: if 0 == iproc: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - lprocs) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, lprocs)) else: if 0 == me: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - nproc) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, nproc)) if 0 == me: print '' print ' overlapping ga.acc is OK' print '' # check the ga.add function if 0 == me: print '> Checking add ...' random.seed(12345) # everyone has same seed for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j] if MIRROR: if 0 == iproc: ga.put(g_b, b) else: if 0 == me: ga.put(g_b, b) ga.add(g_a, g_b, g_b, 0.1, 0.9) if not np.all(ga.get(g_b) == a): ga.error('add failed') if 0 == me: print '' print ' add is OK ' print '' # check the dot function if 0 == me: print '> Checking dot ...' random.seed(12345) # everyone has same seed sum1 = 0.0 for i in range(n): for j in range(n): b[i,j] = random.random() sum1 += a[i,j]*b[i,j] if MIRROR: if 0 == iproc: pass else: if 0 == me: ga.put(g_b, b) ga.put(g_a, a) ga.sync() sum2 = ga.dot(g_a, g_b) if mismatch(sum1, sum2): ga.error('dot wrong %s != %s' % (sum1, sum2)) if 0 == me: print '' print ' dot is OK ' print '' # check the ga.scale function if 0 == me: print '> Checking scale ...' ga.scale(g_a, 0.123) result = ga.get(g_a) if not np.all(a*0.123 == ga.get(g_a)): ga.error('scale failed') if 0 == me: print '' print ' scale is OK ' print '' # check the ga.copy function if 0 == me: print '' print '> Checking copy' print '' if 0 == me: ga.put(g_a, a) ga.copy(g_a, g_b) if not np.all(a == ga.get(g_b)): ga.error('copy failed') if 0 == me: print '' print ' copy is OK ' print '' ga.sync() if 0 == me: print '> Checking scatter/gather (might be slow)...' ga.sync() ijv = np.zeros((m,2), dtype=np.int64) random.seed(ga.nodeid()*51 + 1) # different seed for each proc for j in range(10): itmp = None if MIRROR: itmp = random.randint(0,lprocs-1) else: itmp = random.randint(0,nproc-1) if itmp == me: for loop in range(m): ijv[loop,:] = (random.randint(0,n-1),random.randint(0,n-1)) #if ijv[loop,0] > ijv[loop,1]: # ijv[loop,:] = ijv[loop,::-1] # reverse result = ga.gather(g_a, ijv) for loop in range(m): value = ga.get(g_a, ijv[loop], ijv[loop]+1).flatten() if not result[loop] == value: ga.error('gather failed') if 0 == me: print '' print ' gather is OK' print ''
if MIRROR: a[:] = np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: a[:] = np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) b[:] = -1
a[:] = init_first_a(gatype, nptype, n) b[:] = init_first_b(gatype, nptype, n)
def check(gatype, nptype): n = 256 m = 2*n a = np.zeros((n,n), dtype=nptype) b = np.zeros((n,n), dtype=nptype) maxloop = 100 maxproc = 4096 num_restricted = 0 restricted_list = 0 iproc = me % lprocs nloop = min(maxloop,n) if USE_RESTRICTED: num_restricted = nproc/2 restricted_list = [0]*num_restricted if (num_restricted == 0): num_restricted = 1 for i in range(num_restricted): restricted_list[i] = (num_restricted/2) + i if BLOCK_CYCLIC: block_size = [32,32] if USE_SCALAPACK_DISTR: if nproc % 2 == 0: ga.error('Available procs must be divisible by 2',nproc) proc_grid = [2,nproc/2] # a[] is a local copy of what the global array should start as if MIRROR: a[:] = np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: a[:] = np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) b[:] = -1 # create a global array if NEW_API: g_a = ga.create_handle() ga.set_data(g_a, [n,n], gatype) ga.set_array_name(g_a, 'a') if USE_RESTRICTED: ga.set_restricted(g_a, restricted_list) if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_a, block_size, proc_grid) else: ga.set_block_cyclic(g_a, block_size) if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.set_pgroup(g_a, p_mirror) ga.allocate(g_a) else: if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.create_config(gatype, (n,n), 'a', None, p_mirror) else: g_a = ga.create(gatype, (n,n), 'a') if 0 == g_a: ga.error('ga.create failed') if MIRROR: lproc = me - ga.cluster_procid(inode, 0) lo,hi = ga.distribution(g_a, lproc) else: lo,hi = ga.distribution(g_a, me) ga.sync() # zero the array if 0 == me: print '> Checking zero ...' ga.zero(g_a) # check that it is indeed zero b = ga.get(g_a, buffer=b) # gets the result into supplied buffer b ga.sync() if not np.all(b == 0): ga.error('ga.zero failed') if 0 == me: print '' print ' ga.zero is OK' print '' ga.sync() # each node fills in disjoint sections of the array if 0 == me: print '> Checking disjoint put ... ' ga.sync() inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] #piece = a[lo[0]:hi[0], lo[1]:hi[1]] piece = a[ga.zip(lo,hi)] ga.put(g_a, piece, lo, hi) # the following check is not part of the original test.F result = ga.get(g_a, lo, hi) # ndarray created inside get if not np.all(result == piece): ga.error("put followed by get failed", 1) ga.sync() ij += 1 ga.sync() # all nodes check all of a b[:] = 0 b = ga.get(g_a, buffer=b) if not np.all(a == b): ga.error('put failed, exiting') if 0 == me: print '' print ' ga.put is OK' print '' # now check nloop random gets from each node if 0 == me: print '> Checking random get (%d calls)...' % nloop ga.sync() nwords = 0 random.seed(ga.nodeid()*51+1) # different seed for each proc for loop in range(nloop): ilo,ihi = random.randint(0, nloop-1),random.randint(0, nloop-1) if ihi < ilo: ilo,ihi = ihi,ilo jlo,jhi = random.randint(0, nloop-1),random.randint(0, nloop-1) if jhi < jlo: jlo,jhi = jhi,jlo nwords += (ihi-ilo+1)*(jhi-jlo+1) ihi += 1 jhi += 1 result = ga.get(g_a, (ilo,jlo), (ihi,jhi)) if not np.all(result == a[ilo:ihi,jlo:jhi]): ga.error('random get failed') if 0 == me and loop % max(1,nloop/20) == 0: print ' call %d node %d checking get((%d,%d),(%d,%d)) total %f' % ( loop, me, ilo, ihi, jlo, jhi, nwords) if 0 == me: print '' print ' ga_get is OK' print '' # each node accumulates into disjoint sections of the array if 0 == me: print '> Checking accumulate ... ' ga.sync() random.seed(12345) # same seed for each process b[:] = np.fromfunction(lambda i,j: i+j+2, (n,n), dtype=nptype) inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): x = 10.0 lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] piece = b[lo[0]:hi[0], lo[1]:hi[1]] check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: ga.acc(g_a, lo, hi, piece, x) ga.sync() ij += 1 # each process applies all updates to its local copy a[lo[0]:hi[0], lo[1]:hi[1]] += x * piece ga.sync() # all nodes check all of a if not np.all(ga.get(g_a) == a): ga.error('acc failed') if 0 == me: print '' print ' disjoint ga.acc is OK' print '' # overlapping accumulate ga.sync() if NEW_API: g_b = ga.create_handle() ga.set_data(g_b, (n,n), gatype) ga.set_array_name(g_b, 'b') if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_b, block_size, proc_grid) else: ga.set_block_cyclic(g_b, block_size) if MIRROR: ga.set_pgroup(g_b, p_mirror) if not ga.allocate(g_b): ga.error('ga.create failed for second array') else: if MIRROR: g_b = ga.create_config(gatype, (n,n), 'b', chunk, p_mirror) else: g_b = ga.create(gatype, (n,n), 'b') if 0 == g_b: ga.error('ga.create failed for second array') ga.zero(g_b) ga.acc(g_b, (n/2,n/2), (n/2+1,n/2+1), [1], 1) ga.sync() x = None if MIRROR: if 0 == iproc: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - lprocs) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, lprocs)) else: if 0 == me: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - nproc) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, nproc)) if 0 == me: print '' print ' overlapping ga.acc is OK' print '' # check the ga.add function if 0 == me: print '> Checking add ...' random.seed(12345) # everyone has same seed for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j] if MIRROR: if 0 == iproc: ga.put(g_b, b) else: if 0 == me: ga.put(g_b, b) ga.add(g_a, g_b, g_b, 0.1, 0.9) if not np.all(ga.get(g_b) == a): ga.error('add failed') if 0 == me: print '' print ' add is OK ' print '' # check the dot function if 0 == me: print '> Checking dot ...' random.seed(12345) # everyone has same seed sum1 = 0.0 for i in range(n): for j in range(n): b[i,j] = random.random() sum1 += a[i,j]*b[i,j] if MIRROR: if 0 == iproc: pass else: if 0 == me: ga.put(g_b, b) ga.put(g_a, a) ga.sync() sum2 = ga.dot(g_a, g_b) if mismatch(sum1, sum2): ga.error('dot wrong %s != %s' % (sum1, sum2)) if 0 == me: print '' print ' dot is OK ' print '' # check the ga.scale function if 0 == me: print '> Checking scale ...' ga.scale(g_a, 0.123) result = ga.get(g_a) if not np.all(a*0.123 == ga.get(g_a)): ga.error('scale failed') if 0 == me: print '' print ' scale is OK ' print '' # check the ga.copy function if 0 == me: print '' print '> Checking copy' print '' if 0 == me: ga.put(g_a, a) ga.copy(g_a, g_b) if not np.all(a == ga.get(g_b)): ga.error('copy failed') if 0 == me: print '' print ' copy is OK ' print '' ga.sync() if 0 == me: print '> Checking scatter/gather (might be slow)...' ga.sync() ijv = np.zeros((m,2), dtype=np.int64) random.seed(ga.nodeid()*51 + 1) # different seed for each proc for j in range(10): itmp = None if MIRROR: itmp = random.randint(0,lprocs-1) else: itmp = random.randint(0,nproc-1) if itmp == me: for loop in range(m): ijv[loop,:] = (random.randint(0,n-1),random.randint(0,n-1)) #if ijv[loop,0] > ijv[loop,1]: # ijv[loop,:] = ijv[loop,::-1] # reverse result = ga.gather(g_a, ijv) for loop in range(m): value = ga.get(g_a, ijv[loop], ijv[loop]+1).flatten() if not result[loop] == value: ga.error('gather failed') if 0 == me: print '' print ' gather is OK' print ''
for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j]
if gatype == ga.C_SCPL: for i in range(n): for j in range(n): b[i,j] = complex(random.random(),random.random()) a[i,j] = complex(0.1,-.1)*a[i,j] + complex(0.9,-.9)*b[i,j] else: for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j]
def check(gatype, nptype): n = 256 m = 2*n a = np.zeros((n,n), dtype=nptype) b = np.zeros((n,n), dtype=nptype) maxloop = 100 maxproc = 4096 num_restricted = 0 restricted_list = 0 iproc = me % lprocs nloop = min(maxloop,n) if USE_RESTRICTED: num_restricted = nproc/2 restricted_list = [0]*num_restricted if (num_restricted == 0): num_restricted = 1 for i in range(num_restricted): restricted_list[i] = (num_restricted/2) + i if BLOCK_CYCLIC: block_size = [32,32] if USE_SCALAPACK_DISTR: if nproc % 2 == 0: ga.error('Available procs must be divisible by 2',nproc) proc_grid = [2,nproc/2] # a[] is a local copy of what the global array should start as if MIRROR: a[:] = np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: a[:] = np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) b[:] = -1 # create a global array if NEW_API: g_a = ga.create_handle() ga.set_data(g_a, [n,n], gatype) ga.set_array_name(g_a, 'a') if USE_RESTRICTED: ga.set_restricted(g_a, restricted_list) if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_a, block_size, proc_grid) else: ga.set_block_cyclic(g_a, block_size) if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.set_pgroup(g_a, p_mirror) ga.allocate(g_a) else: if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.create_config(gatype, (n,n), 'a', None, p_mirror) else: g_a = ga.create(gatype, (n,n), 'a') if 0 == g_a: ga.error('ga.create failed') if MIRROR: lproc = me - ga.cluster_procid(inode, 0) lo,hi = ga.distribution(g_a, lproc) else: lo,hi = ga.distribution(g_a, me) ga.sync() # zero the array if 0 == me: print '> Checking zero ...' ga.zero(g_a) # check that it is indeed zero b = ga.get(g_a, buffer=b) # gets the result into supplied buffer b ga.sync() if not np.all(b == 0): ga.error('ga.zero failed') if 0 == me: print '' print ' ga.zero is OK' print '' ga.sync() # each node fills in disjoint sections of the array if 0 == me: print '> Checking disjoint put ... ' ga.sync() inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] #piece = a[lo[0]:hi[0], lo[1]:hi[1]] piece = a[ga.zip(lo,hi)] ga.put(g_a, piece, lo, hi) # the following check is not part of the original test.F result = ga.get(g_a, lo, hi) # ndarray created inside get if not np.all(result == piece): ga.error("put followed by get failed", 1) ga.sync() ij += 1 ga.sync() # all nodes check all of a b[:] = 0 b = ga.get(g_a, buffer=b) if not np.all(a == b): ga.error('put failed, exiting') if 0 == me: print '' print ' ga.put is OK' print '' # now check nloop random gets from each node if 0 == me: print '> Checking random get (%d calls)...' % nloop ga.sync() nwords = 0 random.seed(ga.nodeid()*51+1) # different seed for each proc for loop in range(nloop): ilo,ihi = random.randint(0, nloop-1),random.randint(0, nloop-1) if ihi < ilo: ilo,ihi = ihi,ilo jlo,jhi = random.randint(0, nloop-1),random.randint(0, nloop-1) if jhi < jlo: jlo,jhi = jhi,jlo nwords += (ihi-ilo+1)*(jhi-jlo+1) ihi += 1 jhi += 1 result = ga.get(g_a, (ilo,jlo), (ihi,jhi)) if not np.all(result == a[ilo:ihi,jlo:jhi]): ga.error('random get failed') if 0 == me and loop % max(1,nloop/20) == 0: print ' call %d node %d checking get((%d,%d),(%d,%d)) total %f' % ( loop, me, ilo, ihi, jlo, jhi, nwords) if 0 == me: print '' print ' ga_get is OK' print '' # each node accumulates into disjoint sections of the array if 0 == me: print '> Checking accumulate ... ' ga.sync() random.seed(12345) # same seed for each process b[:] = np.fromfunction(lambda i,j: i+j+2, (n,n), dtype=nptype) inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): x = 10.0 lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] piece = b[lo[0]:hi[0], lo[1]:hi[1]] check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: ga.acc(g_a, lo, hi, piece, x) ga.sync() ij += 1 # each process applies all updates to its local copy a[lo[0]:hi[0], lo[1]:hi[1]] += x * piece ga.sync() # all nodes check all of a if not np.all(ga.get(g_a) == a): ga.error('acc failed') if 0 == me: print '' print ' disjoint ga.acc is OK' print '' # overlapping accumulate ga.sync() if NEW_API: g_b = ga.create_handle() ga.set_data(g_b, (n,n), gatype) ga.set_array_name(g_b, 'b') if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_b, block_size, proc_grid) else: ga.set_block_cyclic(g_b, block_size) if MIRROR: ga.set_pgroup(g_b, p_mirror) if not ga.allocate(g_b): ga.error('ga.create failed for second array') else: if MIRROR: g_b = ga.create_config(gatype, (n,n), 'b', chunk, p_mirror) else: g_b = ga.create(gatype, (n,n), 'b') if 0 == g_b: ga.error('ga.create failed for second array') ga.zero(g_b) ga.acc(g_b, (n/2,n/2), (n/2+1,n/2+1), [1], 1) ga.sync() x = None if MIRROR: if 0 == iproc: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - lprocs) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, lprocs)) else: if 0 == me: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - nproc) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, nproc)) if 0 == me: print '' print ' overlapping ga.acc is OK' print '' # check the ga.add function if 0 == me: print '> Checking add ...' random.seed(12345) # everyone has same seed for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j] if MIRROR: if 0 == iproc: ga.put(g_b, b) else: if 0 == me: ga.put(g_b, b) ga.add(g_a, g_b, g_b, 0.1, 0.9) if not np.all(ga.get(g_b) == a): ga.error('add failed') if 0 == me: print '' print ' add is OK ' print '' # check the dot function if 0 == me: print '> Checking dot ...' random.seed(12345) # everyone has same seed sum1 = 0.0 for i in range(n): for j in range(n): b[i,j] = random.random() sum1 += a[i,j]*b[i,j] if MIRROR: if 0 == iproc: pass else: if 0 == me: ga.put(g_b, b) ga.put(g_a, a) ga.sync() sum2 = ga.dot(g_a, g_b) if mismatch(sum1, sum2): ga.error('dot wrong %s != %s' % (sum1, sum2)) if 0 == me: print '' print ' dot is OK ' print '' # check the ga.scale function if 0 == me: print '> Checking scale ...' ga.scale(g_a, 0.123) result = ga.get(g_a) if not np.all(a*0.123 == ga.get(g_a)): ga.error('scale failed') if 0 == me: print '' print ' scale is OK ' print '' # check the ga.copy function if 0 == me: print '' print '> Checking copy' print '' if 0 == me: ga.put(g_a, a) ga.copy(g_a, g_b) if not np.all(a == ga.get(g_b)): ga.error('copy failed') if 0 == me: print '' print ' copy is OK ' print '' ga.sync() if 0 == me: print '> Checking scatter/gather (might be slow)...' ga.sync() ijv = np.zeros((m,2), dtype=np.int64) random.seed(ga.nodeid()*51 + 1) # different seed for each proc for j in range(10): itmp = None if MIRROR: itmp = random.randint(0,lprocs-1) else: itmp = random.randint(0,nproc-1) if itmp == me: for loop in range(m): ijv[loop,:] = (random.randint(0,n-1),random.randint(0,n-1)) #if ijv[loop,0] > ijv[loop,1]: # ijv[loop,:] = ijv[loop,::-1] # reverse result = ga.gather(g_a, ijv) for loop in range(m): value = ga.get(g_a, ijv[loop], ijv[loop]+1).flatten() if not result[loop] == value: ga.error('gather failed') if 0 == me: print '' print ' gather is OK' print ''
ga.add(g_a, g_b, g_b, 0.1, 0.9) if not np.all(ga.get(g_b) == a):
if gatype == ga.C_SCPL: ga.add(g_a, g_b, g_b, complex(0.1,-0.1), complex(0.9,-0.9)) else: ga.add(g_a, g_b, g_b, 0.1, 0.9) b = ga.get(g_b, buffer=b) if np.any(np.vectorize(mismatch)(ga.get(g_b),a)):
def check(gatype, nptype): n = 256 m = 2*n a = np.zeros((n,n), dtype=nptype) b = np.zeros((n,n), dtype=nptype) maxloop = 100 maxproc = 4096 num_restricted = 0 restricted_list = 0 iproc = me % lprocs nloop = min(maxloop,n) if USE_RESTRICTED: num_restricted = nproc/2 restricted_list = [0]*num_restricted if (num_restricted == 0): num_restricted = 1 for i in range(num_restricted): restricted_list[i] = (num_restricted/2) + i if BLOCK_CYCLIC: block_size = [32,32] if USE_SCALAPACK_DISTR: if nproc % 2 == 0: ga.error('Available procs must be divisible by 2',nproc) proc_grid = [2,nproc/2] # a[] is a local copy of what the global array should start as if MIRROR: a[:] = np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: a[:] = np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) b[:] = -1 # create a global array if NEW_API: g_a = ga.create_handle() ga.set_data(g_a, [n,n], gatype) ga.set_array_name(g_a, 'a') if USE_RESTRICTED: ga.set_restricted(g_a, restricted_list) if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_a, block_size, proc_grid) else: ga.set_block_cyclic(g_a, block_size) if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.set_pgroup(g_a, p_mirror) ga.allocate(g_a) else: if MIRROR: p_mirror = ga.pgroup_get_mirror() ga.create_config(gatype, (n,n), 'a', None, p_mirror) else: g_a = ga.create(gatype, (n,n), 'a') if 0 == g_a: ga.error('ga.create failed') if MIRROR: lproc = me - ga.cluster_procid(inode, 0) lo,hi = ga.distribution(g_a, lproc) else: lo,hi = ga.distribution(g_a, me) ga.sync() # zero the array if 0 == me: print '> Checking zero ...' ga.zero(g_a) # check that it is indeed zero b = ga.get(g_a, buffer=b) # gets the result into supplied buffer b ga.sync() if not np.all(b == 0): ga.error('ga.zero failed') if 0 == me: print '' print ' ga.zero is OK' print '' ga.sync() # each node fills in disjoint sections of the array if 0 == me: print '> Checking disjoint put ... ' ga.sync() inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] #piece = a[lo[0]:hi[0], lo[1]:hi[1]] piece = a[ga.zip(lo,hi)] ga.put(g_a, piece, lo, hi) # the following check is not part of the original test.F result = ga.get(g_a, lo, hi) # ndarray created inside get if not np.all(result == piece): ga.error("put followed by get failed", 1) ga.sync() ij += 1 ga.sync() # all nodes check all of a b[:] = 0 b = ga.get(g_a, buffer=b) if not np.all(a == b): ga.error('put failed, exiting') if 0 == me: print '' print ' ga.put is OK' print '' # now check nloop random gets from each node if 0 == me: print '> Checking random get (%d calls)...' % nloop ga.sync() nwords = 0 random.seed(ga.nodeid()*51+1) # different seed for each proc for loop in range(nloop): ilo,ihi = random.randint(0, nloop-1),random.randint(0, nloop-1) if ihi < ilo: ilo,ihi = ihi,ilo jlo,jhi = random.randint(0, nloop-1),random.randint(0, nloop-1) if jhi < jlo: jlo,jhi = jhi,jlo nwords += (ihi-ilo+1)*(jhi-jlo+1) ihi += 1 jhi += 1 result = ga.get(g_a, (ilo,jlo), (ihi,jhi)) if not np.all(result == a[ilo:ihi,jlo:jhi]): ga.error('random get failed') if 0 == me and loop % max(1,nloop/20) == 0: print ' call %d node %d checking get((%d,%d),(%d,%d)) total %f' % ( loop, me, ilo, ihi, jlo, jhi, nwords) if 0 == me: print '' print ' ga_get is OK' print '' # each node accumulates into disjoint sections of the array if 0 == me: print '> Checking accumulate ... ' ga.sync() random.seed(12345) # same seed for each process b[:] = np.fromfunction(lambda i,j: i+j+2, (n,n), dtype=nptype) inc = (n-1)/20 + 1 ij = 0 for i in range(0,n,inc): for j in range(0,n,inc): x = 10.0 lo = [i,j] hi = [min(i+inc,n), min(j+inc,n)] piece = b[lo[0]:hi[0], lo[1]:hi[1]] check = False if MIRROR: check = ij % lprocs == iproc else: check = ij % nproc == me if check: ga.acc(g_a, lo, hi, piece, x) ga.sync() ij += 1 # each process applies all updates to its local copy a[lo[0]:hi[0], lo[1]:hi[1]] += x * piece ga.sync() # all nodes check all of a if not np.all(ga.get(g_a) == a): ga.error('acc failed') if 0 == me: print '' print ' disjoint ga.acc is OK' print '' # overlapping accumulate ga.sync() if NEW_API: g_b = ga.create_handle() ga.set_data(g_b, (n,n), gatype) ga.set_array_name(g_b, 'b') if BLOCK_CYCLIC: if USE_SCALAPACK_DISTR: ga.set_block_cyclic_proc_grid(g_b, block_size, proc_grid) else: ga.set_block_cyclic(g_b, block_size) if MIRROR: ga.set_pgroup(g_b, p_mirror) if not ga.allocate(g_b): ga.error('ga.create failed for second array') else: if MIRROR: g_b = ga.create_config(gatype, (n,n), 'b', chunk, p_mirror) else: g_b = ga.create(gatype, (n,n), 'b') if 0 == g_b: ga.error('ga.create failed for second array') ga.zero(g_b) ga.acc(g_b, (n/2,n/2), (n/2+1,n/2+1), [1], 1) ga.sync() x = None if MIRROR: if 0 == iproc: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - lprocs) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, lprocs)) else: if 0 == me: x = abs(ga.get(g_b, (n/2,n/2), (n/2+1,n/2+1))[0,0] - nproc) if not 0 == x: ga.error('overlapping accumulate failed -- expected %s got %s'%( x, nproc)) if 0 == me: print '' print ' overlapping ga.acc is OK' print '' # check the ga.add function if 0 == me: print '> Checking add ...' random.seed(12345) # everyone has same seed for i in range(n): for j in range(n): b[i,j] = random.random() a[i,j] = 0.1*a[i,j] + 0.9*b[i,j] if MIRROR: if 0 == iproc: ga.put(g_b, b) else: if 0 == me: ga.put(g_b, b) ga.add(g_a, g_b, g_b, 0.1, 0.9) if not np.all(ga.get(g_b) == a): ga.error('add failed') if 0 == me: print '' print ' add is OK ' print '' # check the dot function if 0 == me: print '> Checking dot ...' random.seed(12345) # everyone has same seed sum1 = 0.0 for i in range(n): for j in range(n): b[i,j] = random.random() sum1 += a[i,j]*b[i,j] if MIRROR: if 0 == iproc: pass else: if 0 == me: ga.put(g_b, b) ga.put(g_a, a) ga.sync() sum2 = ga.dot(g_a, g_b) if mismatch(sum1, sum2): ga.error('dot wrong %s != %s' % (sum1, sum2)) if 0 == me: print '' print ' dot is OK ' print '' # check the ga.scale function if 0 == me: print '> Checking scale ...' ga.scale(g_a, 0.123) result = ga.get(g_a) if not np.all(a*0.123 == ga.get(g_a)): ga.error('scale failed') if 0 == me: print '' print ' scale is OK ' print '' # check the ga.copy function if 0 == me: print '' print '> Checking copy' print '' if 0 == me: ga.put(g_a, a) ga.copy(g_a, g_b) if not np.all(a == ga.get(g_b)): ga.error('copy failed') if 0 == me: print '' print ' copy is OK ' print '' ga.sync() if 0 == me: print '> Checking scatter/gather (might be slow)...' ga.sync() ijv = np.zeros((m,2), dtype=np.int64) random.seed(ga.nodeid()*51 + 1) # different seed for each proc for j in range(10): itmp = None if MIRROR: itmp = random.randint(0,lprocs-1) else: itmp = random.randint(0,nproc-1) if itmp == me: for loop in range(m): ijv[loop,:] = (random.randint(0,n-1),random.randint(0,n-1)) #if ijv[loop,0] > ijv[loop,1]: # ijv[loop,:] = ijv[loop,::-1] # reverse result = ga.gather(g_a, ijv) for loop in range(m): value = ga.get(g_a, ijv[loop], ijv[loop]+1).flatten() if not result[loop] == value: ga.error('gather failed') if 0 == me: print '' print ' gather is OK' print ''
test2D()
def main(): if 4 != nproc and 0 == me: ga.error('Program requires 4 GA processes') test1D() test2D() if 0 == me: print 'All tests successful'
ignore = ga.get(g_a, llo, lhi, buf[ga.zip(llo,lhi)])
ignore = ga.get(g_a, llo, lhi, buf[ilo:ihi,jlo:jhi])
def time_get(g_a, lo, hi, buf, chunk, jump, local): count = 0 rows = hi[0]-lo[0] cols = hi[1]-lo[1] shifti = [rows, 0, rows] shiftj = [0, cols, cols] seconds = time.time() # distance between consecutive patches increased by jump # to destroy locality of reference for ilo in range(lo[0], hi[0]-chunk-jump+1, chunk+jump): ihi = ilo + chunk for jlo in range(lo[1], hi[1]-chunk-jump+1, chunk+jump): jhi = jlo + chunk count += 1 if local: llo = [ilo,jlo] lhi = [ihi,jhi] ignore = ga.get(g_a, llo, lhi, buf[ga.zip(llo,lhi)]) else: index = count%3 llo = [ilo+shifti[index],jlo+shiftj[index]] lhi = [ihi+shifti[index],jhi+shiftj[index]] ignore = ga.get(g_a, llo, lhi, buf[ga.zip(llo,lhi)]) seconds = time.time() - seconds return seconds/count
ga.put(g_a, buf[ga.zip(llo,lhi)], llo, lhi)
ga.put(g_a, buf[ilo:ihi,jlo:jhi], llo, lhi)
def time_put(g_a, lo, hi, buf, chunk, jump, local): count = 0 rows = hi[0]-lo[0] cols = hi[1]-lo[1] shifti = [rows, 0, rows] shiftj = [0, cols, cols] seconds = time.time() # distance between consecutive patches increased by jump # to destroy locality of reference for ilo in range(lo[0], hi[0]-chunk-jump+1, chunk+jump): ihi = ilo + chunk for jlo in range(lo[1], hi[1]-chunk-jump+1, chunk+jump): jhi = jlo + chunk count += 1 if local: llo = [ilo,jlo] lhi = [ihi,jhi] ga.put(g_a, buf[ga.zip(llo,lhi)], llo, lhi) else: index = count%3 llo = [ilo+shifti[index],jlo+shiftj[index]] lhi = [ihi+shifti[index],jhi+shiftj[index]] ga.put(g_a, buf[ga.zip(llo,lhi)], llo, lhi) seconds = time.time() - seconds return seconds/count
ga.acc(g_a, buf[ga.zip(llo,lhi)], llo, lhi, 1)
ga.acc(g_a, buf[ilo:ihi,jlo:jhi], llo, lhi, 1)
def time_acc(g_a, lo, hi, buf, chunk, jump, local): count = 0 rows = hi[0]-lo[0] cols = hi[1]-lo[1] shifti = [rows, 0, rows] shiftj = [0, cols, cols] seconds = time.time() # distance between consecutive patches increased by jump # to destroy locality of reference for ilo in range(lo[0], hi[0]-chunk-jump+1, chunk+jump): ihi = ilo + chunk for jlo in range(lo[1], hi[1]-chunk-jump+1, chunk+jump): jhi = jlo + chunk count += 1 if local: llo = [ilo,jlo] lhi = [ihi,jhi] ga.acc(g_a, buf[ga.zip(llo,lhi)], llo, lhi, 1) else: index = count%3 llo = [ilo+shifti[index],jlo+shiftj[index]] lhi = [ihi+shifti[index],jhi+shiftj[index]] ga.acc(g_a, buf[ga.zip(llo,lhi)], llo, lhi, 1) seconds = time.time() - seconds return seconds/count
TestPutGetAcc1(g_a, n, chunk, buf, lo, hi, True)
def test1D(): n = 1024*1024 buf = np.zeros(n/4, dtype=np.float64) chunk = np.asarray([1,9,16,81,256,576,900,2304,4096,8281, 16384,29241,65536,124609,193600,262144]) g_a = ga.create(ga.C_DBL, (n,), 'a') if 0 == g_a: ga.error('ga.create failed') buf[:] = 0.01 ga.zero(g_a) if 0 == me: print '' print '' print '' print (' Performance of GA get, put & acc' ' for 1-dimensional sections of array[%d]' % n) lo,hi = ga.distribution(g_a, me) # remote ops TestPutGetAcc1(g_a, n, chunk, buf, lo, hi, False) # local ops TestPutGetAcc1(g_a, n, chunk, buf, lo, hi, True)
print ilo,ihi ignore = ga.get(g_a, llo, lhi, buf[llo:lhi])
ignore = ga.get(g_a, llo, lhi, buf[ilo:ihi])
def time_get1(g_a, lo, hi, buf, chunk, jump, local): count = 0 rows = hi[0]-lo[0] shift = [3*rows, 2*rows, rows] seconds = time.time() # distance between consecutive patches increased by jump # to destroy locality of reference for ilo in range(lo[0], hi[0]-chunk-jump+1, chunk+jump): ihi = ilo+chunk count += 1 if local: ignore = ga.get(g_a, [ilo], [ihi], buf[ilo:ihi]) else: index = count%3 llo = ilo+shift[index] lhi = ihi+shift[index] print ilo,ihi ignore = ga.get(g_a, llo, lhi, buf[llo:lhi]) seconds = time.time() - seconds return seconds/count
ga.put(g_a, buf[llo:lhi], llo, lhi)
ga.put(g_a, buf[ilo:ihi], llo, lhi)
def time_put1(g_a, lo, hi, buf, chunk, jump, local): count = 0 rows = hi[0]-lo[0] shift = [rows, 2*rows, 3*rows] seconds = time.time() # distance between consecutive patches increased by jump # to destroy locality of reference for ilo in range(lo[0], hi[0]-chunk-jump+1, chunk+jump): ihi = ilo+chunk count += 1 if local: ga.put(g_a, buf[ilo:ihi], [ilo], [ihi]) else: index = count%3 llo = ilo+shift[index] lhi = ihi+shift[index] ga.put(g_a, buf[llo:lhi], llo, lhi) seconds = time.time() - seconds return seconds/count
elif gatype == ga.C_INT:
elif gatype in [ga.C_INT,ga.C_LONG]:
def create_local_a(gatype): """TODO""" nptype = ga.dtype(gatype) if gatype == ga.C_SCPL: if MIRROR: a = np.fromfunction(lambda i,j: i+inode, (n,n), dtype=np.float32) b = np.fromfunction(lambda i,j: j*n, (n,n), dtype=np.float32) return np.vectorize(complex)(a,b) else: a = np.fromfunction(lambda i,j: i, (n,n), dtype=np.float32) b = np.fromfunction(lambda i,j: j*n, (n,n), dtype=np.float32) return np.vectorize(complex)(a,b) elif gatype == ga.C_DCPL: if MIRROR: a = np.fromfunction(lambda i,j: i+inode, (n,n), dtype=np.float64) b = np.fromfunction(lambda i,j: j*n, (n,n), dtype=np.float64) return np.vectorize(complex)(a,b) else: a = np.fromfunction(lambda i,j: i, (n,n), dtype=np.float64) b = np.fromfunction(lambda i,j: j*n, (n,n), dtype=np.float64) return np.vectorize(complex)(a,b) elif gatype in [ga.C_DBL,ga.C_FLT]: if MIRROR: return np.fromfunction(lambda i,j: inode+i+j*n, (n,n), dtype=nptype) else: return np.fromfunction(lambda i,j: i+j*n, (n,n), dtype=nptype) elif gatype == ga.C_INT: if MIRROR: return np.fromfunction(lambda i,j: inode+i+j*1000, (n,n), dtype=nptype) else: return np.fromfunction(lambda i,j: i+j*1000, (n,n), dtype=nptype)
check_dot(gatype) check_scale(gatype)
def check_int(): gatype = ga.C_INT check_zero(gatype) check_put_disjoint(gatype) check_get(gatype) check_accumulate_disjoint(gatype) check_accumulate_overlap(gatype) #check_add(gatype) check_dot(gatype) check_scale(gatype) check_copy(gatype) check_scatter_gather(gatype) """ ga.sync() if 0 == me and n > 7: print '' print '> Checking ga.print_patch --- should see ' #print ' [2002 3002 4002 5002 6002]' #print ' [2003 3003 4003 5003 6003]' #print ' [2004 3004 4004 5004 6004]' print '' if n > 7: ga.print_patch(g_a, (3,3), (5,7)) """
check_print_patch(gatype) check_fence_and_lock(gatype)
def check_int(): gatype = ga.C_INT check_zero(gatype) check_put_disjoint(gatype) check_get(gatype) check_accumulate_disjoint(gatype) check_accumulate_overlap(gatype) #check_add(gatype) check_dot(gatype) check_scale(gatype) check_copy(gatype) check_scatter_gather(gatype) """ ga.sync() if 0 == me and n > 7: print '' print '> Checking ga.print_patch --- should see ' #print ' [2002 3002 4002 5002 6002]' #print ' [2003 3003 4003 5003 6003]' #print ' [2004 3004 4004 5004 6004]' print '' if n > 7: ga.print_patch(g_a, (3,3), (5,7)) """
pass
gatype = ga.C_LONG check_zero(gatype) check_put_disjoint(gatype) check_get(gatype) check_accumulate_disjoint(gatype) check_accumulate_overlap(gatype) check_copy(gatype) check_scatter_gather(gatype) check_print_patch(gatype) check_fence_and_lock(gatype)
def check_long(): pass
if do_cmdline and pyfeatures.get('nmag', 'clean', raw=True): nsim.snippets.rename_old_files([logfilename])
def setup(argv=None, do_features=True, do_logging=True, do_welcome=None, log_to_console_only=None, warn_about_py_ext=True): """Carry out the various stages of the setup of Nsim. do_cmdline: the command line is parsed using the OptionParser object stored inside cmdline_parser variable. If it has not been set by the user, then it is automatically generated in this function. """ # If the setup has been marked as completed then exit immediately if task_done['completed']: return # Check once for all what really needs to be done do_features = do_features and not task_done['features'] do_cmdline = (argv != None) and not task_done['cmdline'] do_logging = do_logging and not task_done['logging'] do_welcome = (do_welcome == True or (do_welcome == None and not task_done['welcome'])) # We first parse the command line (options, arguments) = (None, None) if do_cmdline: global cmdline_parser if cmdline_parser == None: cmdline_parser = generate_cmdline_parser() (options, arguments) = cmdline_parser.parse_args(argv) task_done['cmdline'] = True # We would like now to setup the ocaml and python feature objects. # First, however, we need to determine the name of the logging file, since # it is required in order to construct correctly the pyfeature object. savedir = '.' runid = get_nmag_runid(arguments) logfilename = runid + '_log.log' logfilepath = os.path.join(savedir, logfilename) # We now find out where we should read the configuration for logging logconfigfile = None if options != None and options.logconfigfile != None: logconfigfile = options.logconfigfile # We can now construct the feature objects global pyfeatures, ocamlfeatures if do_features: pyfeatures = \ nsim.features.Features(defaults={'logfilepath':logfilepath}) ocamlfeatures = nsim.features.OcamlFeatures() # We now determine the exact name of the config file which we should # load in order to setup the logger nmaglibrarypath = nsim.snippets.get_absolute_librarypath(__file__)[0] if logconfigfile == None: # The user has not provided his own configuration file for logging # we then have to use one of the default files. if log_to_console_only: logconfigfile = "logging-console.conf" else: logconfigfile = "logging.conf" # We need to prepend the path where this files can be found # (which doesn't make sense if the user has provided his own file) logconfigfile = os.path.join(nmaglibrarypath, logconfigfile) else: # The option log_to_console_only does not make sense in this case. # We may 'assert log_to_console_only == None', but we don't. # We just ignore the issue, this is the best thing to do! pass # We finally fill the pyfeatures object with default values (to be # overridden by command line options) ocamlconfigfile = os.path.join(nmaglibrarypath, 'ocaml.conf') pyfeatures.set('nmag', 'ocamlconfigfile', ocamlconfigfile) pyfeatures.set('nmag', 'logconfigfile', logconfigfile) pyfeatures.set('nmag', 'loglevel', 'info') pyfeatures.set('etc', 'runid', runid) pyfeatures.set('etc', 'mainprogram', 'nmag') pyfeatures.set('etc', 'savedir', savedir) # Location of output files # Move any settings from pyfeatures to OCaml's features living in # 'snippets' pyfeatures.to_ocaml_features() task_done['features'] = True # Interpret the command line arguments and override settings in pyfeatures if do_cmdline: cmdline_to_pyfeatures(options, arguments) # We are now ready to setup the logger global log log = logging.getLogger('nsim') if do_logging: # Setup logging from default logging configuration file nsim.logtools.setup_loggers(logconfigfile, logfilepath) # Last, we need to set the global log level nsim.logtools.set_global_loglevel(pyfeatures.get('nmag', 'loglevel')) # All loggers are set up. setup_ocaml_loggers() log.debug('current logging status is \n%s' % nsim.logtools.logging_status_str()) task_done['logging'] = True # Just a warning message about the extension of the file if warn_about_py_ext and do_cmdline and len(argv) > 0: _, script_extension = os.path.splitext(os.path.split(argv[0])[1]) if script_extension.lower() != ".py": msg = ("Nmag scripts need to have the .py extension. Will wait " "2 seconds before continuing...") log.warn(msg) nsim.snippets.funky_wait(2) if do_cmdline: setup_things(options, arguments) if do_welcome: import ocaml log.debug("Sundials library path ='%s'" % ocaml.get_nsim_sundials_library_path()) nr_cpus = ocaml.petsc_mpi_nr_nodes() log.info("Nmag micromagnetic simulator") log.info("Runid is '%s'" % (pyfeatures.get('etc', 'runid'))) log.info("Using %d CPUs" % (nr_cpus)) if nr_cpus > 1: log.info("Waiting 1 seconds for messages from slaves to arrive " "(experimental)") ocaml.mpi_hello() time.sleep(1) task_done['welcome'] = True # If the simulation has been run before, then we rename the old log file # (this is the last thing we do: we don't want to do this in case there is a # command line parsing error, for example) if do_cmdline and pyfeatures.get('nmag', 'clean', raw=True): nsim.snippets.rename_old_files([logfilename]) return (options, arguments)
["petscvec", "petscmat", "petscksp", "petscts", "petscsnes", "petscdm"],
["petsc", "petscvec", "petscmat", "petscksp", "petscts", "petscsnes", "petscdm"],
def find_file(names, prefixes, suffixes, paths): for full_name in possible_names(names, prefixes, suffixes): full_path = find_path_of_file(full_name, paths) if full_path != None: return (full_name, full_path) return None
raise "Found inconsistency between the mesh you loaded and the " \ "provided list of (region_name, materials). The mesh has " \ "%d regions but the provided list has %d pairs." \ % (self.mesh.numregions-1, len(self.region_name_list))
msg = ("Found inconsistency between the mesh you loaded and the " "provided list of (region_name, materials). The mesh has " "%d regions but the provided list has %d pairs." % (self.mesh.numregions-1, len(self.region_name_list))) raise NmagUserError(msg)
def load_mesh(self, filename, region_names_and_mag_mats, unit_length, do_reorder=False,manual_distribution=None): """ :Parameters:
return self.set_field_data(field_name, subfield_name, save_id)
return self.set_field_data(field_name, subfield_name, save_row)
def set_field_data_at_time(self, field_name, subfield_name, time, equality_tolerance=1e-19): f = self.open_handler() counters = self.get_counters_for_field(field_name, subfield_name) ids = counters['id'] times = counters['time'] rows = counters['row'] num_ids = len(ids) assert len(times) == num_ids, ("Inconsistency found in " "Fields.set_field_data_at_time") lower_maximum = None upper_minimum = None for i in range(num_ids): save_time = times[i] save_id = ids[i] save_row = rows[i] if abs(save_time - time) < equality_tolerance: logmsg("The time you specified (t=%g) matches sufficiently " "well (tolerance=%g) with one for which the data was " "saved (t=%g, row=%d): using this!" % (time, equality_tolerance, save_time, save_row)) return self.set_field_data(field_name, subfield_name, save_id)
def split_strings(ls): s = ls.lstrip()
def split_strings(s, delim='"'): """Similar to s.split() but do not split whatherver is included between two delimiters.""" OUTSIDE, INSIDE, SPACE = range(3) state = SPACE n = len(s) i = 0 begin = 0
def split_strings(ls): s = ls.lstrip() pieces = [] while len(s) > 0: second = s.find('"', 1) assert second > 0 and s[0] == '"' and s[second] == '"', \ ("split_strings expects a number of strings delimited by \" and " "separated by spaces, but it got: '%s'." % ls) pieces.append(s[1: second]) s = s[second + 1:].lstrip() return pieces
while len(s) > 0: second = s.find('"', 1) assert second > 0 and s[0] == '"' and s[second] == '"', \ ("split_strings expects a number of strings delimited by \" and " "separated by spaces, but it got: '%s'." % ls) pieces.append(s[1: second]) s = s[second + 1:].lstrip()
while i < n: c = s[i] inc = 1 if state == SPACE: if not c.isspace(): begin = i state = OUTSIDE elif state == OUTSIDE: if c.isspace(): pieces.append(s[begin:i]) state = SPACE elif c == delim: state = INSIDE else: if c == delim: state = OUTSIDE i += inc if state != SPACE: pieces.append(s[begin:])
def split_strings(ls): s = ls.lstrip() pieces = [] while len(s) > 0: second = s.find('"', 1) assert second > 0 and s[0] == '"' and s[second] == '"', \ ("split_strings expects a number of strings delimited by \" and " "separated by spaces, but it got: '%s'." % ls) pieces.append(s[1: second]) s = s[second + 1:].lstrip() return pieces
return '%s_%s' % (field_name, subfield_name)
if subfield_name == None or len(subfield_name.strip()) == 0: return field_name else: return '%s_%s' % (field_name, subfield_name)
def build_full_field_name(field_name, subfield_name): return '%s_%s' % (field_name, subfield_name)
["DEBUG","Electric_ext","v_Electric_ext",6], ["DEBUG","grad_m","v_grad_m",18], ["DEBUG","H_magnetoelectric","v_H_magnetoelectric",6],
def E_relabeled(new_label): return map(lambda mn: ("E_" + mn, new_label + mn), material_names)
dx, dy, dz = [0.5*ssi for ssi in ss]
dx, dy, dz = [0.5*ssi.value for ssi in ss]
def get_field(self): root_node = self.content segment_node = root_node.a_segment h = segment_node.a_header ss = [h.a_xstepsize, h.a_ystepsize, h.a_zstepsize] dx, dy, dz = [0.5*ssi for ssi in ss]
return FieldLattice(min_max_dim, dim=field_dim,
return FieldLattice(min_max_ndim, dim=field_dim,
def get_field(self): root_node = self.content segment_node = root_node.a_segment h = segment_node.a_header ss = [h.a_xstepsize, h.a_ystepsize, h.a_zstepsize] dx, dy, dz = [0.5*ssi for ssi in ss]
"""contribs : | contrib
"""contribs : contrib
def p_contribs(t): """contribs : | contrib | contribs SIGN contrib""" # { [$1] } # { let (c1, t2) = $3 in ($2*.c1, t2)::$1 } pass
"""fields : | field
"""fields : field
def p_fields(t): """fields : | field | field COMMA fields""" #{ [$1] } #{ $1::$3 } pass
"""region_logic_atomic : | LPAREN region_logic RPAREN
"""region_logic_atomic : LPAREN region_logic RPAREN
def p_region_logic_atomic(t): """region_logic_atomic : | LPAREN region_logic RPAREN | INT | STRING | DOF_REGION_ALL EQUALS INT | DOF_REGION_SOME EQUALS INT | DOF_REGION_ALL EQUALS STRING | DOF_REGION_SOME EQUALS STRING | HASH EQUALS INT """ # {$2} # {DLOG_some (string_of_int $1)} # {DLOG_some $1} # {DLOG_all (string_of_int $3)} # {DLOG_some (string_of_int $3)} # {DLOG_all $3} # {DLOG_some $3} # {DLOG_nregions $3} pass
"""region_logic_opt_not : | DOF_REGION_NOT region_logic_atomic
"""region_logic_opt_not : DOF_REGION_NOT region_logic_atomic
def p_region_logic_opt_not(t): """region_logic_opt_not : | DOF_REGION_NOT region_logic_atomic | region_logic_atomic """ # {DLOG_not $2} # {$1} pass
"""region_logic_and : | region_logic_opt_not DOF_REGION_AND region_logic_and
"""region_logic_and : region_logic_opt_not DOF_REGION_AND region_logic_and
def p_region_logic_and(t): """region_logic_and : | region_logic_opt_not DOF_REGION_AND region_logic_and | region_logic_opt_not """ # {DLOG_and [$1;$3]} # {$1} pass
"""region_logic_or : | region_logic_and DOF_REGION_OR region_logic_or
"""region_logic_or : region_logic_and DOF_REGION_OR region_logic_or
def p_region_logic_or(t): """region_logic_or : | region_logic_and DOF_REGION_OR region_logic_or | region_logic_and""" # {DLOG_or [$1;$3]} # {$1} pass
if physical_quantity == 0: return True physical_quantity = Physical(float(physical_quantity)) if self._value == 0.0 or physical_quantity._dims == self._dims:
return ((physical_quantity == 0) or reduce(lambda x, y: x and (y == 0), self._dims, True)) elif self._value == 0.0:
def is_compatible_with(self, physical_quantity): """ Returns True when the given physical quantity is compatible with the object itself.
try: return self.is_compatible_with(Physical(float(physical_quantity))) except: return False
elif isinstance(physical_quantity, SI): return (physical_quantity._dims == self._dims) else: try: x = float(physical_quantity) except: return False return self.is_compatible_with(x)
def is_compatible_with(self, physical_quantity): """ Returns True when the given physical quantity is compatible with the object itself.
return svnversion.svnversion
try: import svnversion return svnversion.svnversion except: return "(unknown)"
def get_version_string(): return svnversion.svnversion
msg += "\n\tnsim.svnversion =" + str(nsim.svnversion.svnversion)
msg += "\n\tnsim.version =" + str(v)
def get_nmag_release_dist_svn_string(): msg = "Versions:" msg += "\n\tnsim.svnversion =" + str(nsim.svnversion.svnversion) msg += "\n\tnmag-release =" + str(get_nmag_release()) msg += "\n\tnmag-distribution-mode =" + str(get_nmag_distmode()) msg += "\n\tnmag-release-date =" + str(get_nmag_release_date()) return msg
u = hdf5.get_mesh_unit(filehandle)
u = SI(1e-9, "m")
def set_from_file(self, filehandle, field_name, subfield_name): """Read the field info from the given hdf5 file handle or file name. """ full_name = build_full_field_name(field_name, subfield_name)
configs["petsc-libdir"] = (
configs["petsc-libdir"] = [
def find_file(names, prefixes, suffixes, paths): for full_name in possible_names(names, prefixes, suffixes): full_path = find_path_of_file(full_name, paths) if full_path != None: return (full_name, full_path) return None
["petsc", "petscvec", "petscmat", "petscksp", "petscts", "petscsnes", "petscdm"],
["petscvec", "petscmat", "petscksp", "petscts", "petscsnes", "petscdm"],
def find_file(names, prefixes, suffixes, paths): for full_name in possible_names(names, prefixes, suffixes): full_path = find_path_of_file(full_name, paths) if full_path != None: return (full_name, full_path) return None
)
]
def find_file(names, prefixes, suffixes, paths): for full_name in possible_names(names, prefixes, suffixes): full_path = find_path_of_file(full_name, paths) if full_path != None: return (full_name, full_path) return None
mwe_field_by_name = self._master_mwes_and_fields_by_name
def dump_fields(self, fields=None, filename=None, inspect=None, format="binary"): """Dump the given field to a vtk file and launch Mayavi for visualisation, if required (useful for interactive work).""" if filename == None: filename = "%s-dump.vtk" % self.name
fields = [f for _, f in mwe_field_by_name.values()]
fields = [self._fields[name] for name in self._fields.keys()]
def dump_fields(self, fields=None, filename=None, inspect=None, format="binary"): """Dump the given field to a vtk file and launch Mayavi for visualisation, if required (useful for interactive work).""" if filename == None: filename = "%s-dump.vtk" % self.name
fields = [mwe_field_by_name[name][1] for name in fields]
fields = [self._fields[name] for name in fields]
def dump_fields(self, fields=None, filename=None, inspect=None, format="binary"): """Dump the given field to a vtk file and launch Mayavi for visualisation, if required (useful for interactive work).""" if filename == None: filename = "%s-dump.vtk" % self.name
if int(mapid) >= 100:
if int(mapid) in (100, 101):
def mapcols(mapid): # "i" and "o" are omitted from barrington atlas columns in maps 100-102 if int(mapid) >= 100: return 'abcdefghjklmnpqrstuvwxyz' else: return 'abcdefghijklmnopqrstuvwxyz'
site = container.portal_url.getPortalObject()
site = getToolByName(container, 'portal_url').getPortalObject()
def getTemplate(self, container): site = container.portal_url.getPortalObject() parent = container container_base = Acquisition.aq_base(container) while parent is not None: template = component.queryMultiAdapter( (parent, self), interfaces.ITemplate) parent_base = Acquisition.aq_base(parent) if template is not None and ( parent_base is container_base or not interfaces.IContainerOnlyTemplate.providedBy(template) ): return template if Acquisition.aq_base(parent) is site: return parent = Acquisition.aq_parent(parent) return interfaces.ITemplate(self, None)
if list1[x+y] != list2[y]: break else: counter += 1
try: if list1[x+y] != list2[y]: break else: counter += 1 except IndexError: return -1
def locateInArray(list1, list2): x = 0 y = 0 for x in xrange(len(list1)): if list1[x] == list2[0]: counter = 0 for y in xrange(len(list2)): if list1[x+y] != list2[y]: break else: counter += 1 if counter == len(list2): return x return -1
for d in self.lib_dirs: sys.path.insert(0, os.path.join(root, d))
sys.path.append(root)
def configure(self, options, config): super(NoseGAE, self).configure(options, config) if not self.enabled: return self.config = config if options.gae_app is not None: self._path = options.gae_app else: self._path = config.workingDir if options.gae_lib_root is not None: root = self._gae_path = options.gae_lib_root for d in self.lib_dirs: sys.path.insert(0, os.path.join(root, d)) else: self._gae_path = None if options.gae_data is not None: self._data_path = options.gae_data self._temp_data = False else: self._data_path = os.path.join(tempfile.gettempdir(), 'nosegae.datastore') self._temp_data = True self.sandbox_enabled = options.sandbox_enabled try: from google.appengine.tools import dev_appserver from google.appengine.tools.dev_appserver_main import \ DEFAULT_ARGS, ARG_CLEAR_DATASTORE, ARG_LOG_LEVEL, \ ARG_DATASTORE_PATH, ARG_HISTORY_PATH self._gae = {'dev_appserver': dev_appserver, 'ARG_LOG_LEVEL': ARG_LOG_LEVEL, 'ARG_CLEAR_DATASTORE': ARG_CLEAR_DATASTORE, 'ARG_DATASTORE_PATH': ARG_DATASTORE_PATH, 'ARG_HISTORY_PATH': ARG_HISTORY_PATH, 'DEFAULT_ARGS': DEFAULT_ARGS} # prefill these into sys.modules import webob import yaml import django except ImportError, e: self.enabled = False warn("Google App Engine not found in %s" % options.gae_lib_root, RuntimeWarning) if sys.version_info[0:2] < (2,5): raise EnvironmentError( "Python version must be 2.5 or greater, like the Google App Engine environment. " "Tests are running with: %s" % sys.version) # As of SDK 1.2.5 the dev_appserver.py aggressively adds some logging handlers. # This removes the handlers but note that Nose will still capture logging and # report it during failures. See Issue 25 for more info. rootLogger = logging.getLogger() for handler in rootLogger.handlers: if isinstance(handler, logging.StreamHandler): rootLogger.removeHandler(handler)
warn("Google App Engine not found in %s" % options.gae_lib_root, RuntimeWarning)
raise
def configure(self, options, config): super(NoseGAE, self).configure(options, config) if not self.enabled: return self.config = config if options.gae_app is not None: self._path = options.gae_app else: self._path = config.workingDir if options.gae_lib_root is not None: root = self._gae_path = options.gae_lib_root for d in self.lib_dirs: sys.path.insert(0, os.path.join(root, d)) else: self._gae_path = None if options.gae_data is not None: self._data_path = options.gae_data self._temp_data = False else: self._data_path = os.path.join(tempfile.gettempdir(), 'nosegae.datastore') self._temp_data = True self.sandbox_enabled = options.sandbox_enabled try: from google.appengine.tools import dev_appserver from google.appengine.tools.dev_appserver_main import \ DEFAULT_ARGS, ARG_CLEAR_DATASTORE, ARG_LOG_LEVEL, \ ARG_DATASTORE_PATH, ARG_HISTORY_PATH self._gae = {'dev_appserver': dev_appserver, 'ARG_LOG_LEVEL': ARG_LOG_LEVEL, 'ARG_CLEAR_DATASTORE': ARG_CLEAR_DATASTORE, 'ARG_DATASTORE_PATH': ARG_DATASTORE_PATH, 'ARG_HISTORY_PATH': ARG_HISTORY_PATH, 'DEFAULT_ARGS': DEFAULT_ARGS} # prefill these into sys.modules import webob import yaml import django except ImportError, e: self.enabled = False warn("Google App Engine not found in %s" % options.gae_lib_root, RuntimeWarning) if sys.version_info[0:2] < (2,5): raise EnvironmentError( "Python version must be 2.5 or greater, like the Google App Engine environment. " "Tests are running with: %s" % sys.version) # As of SDK 1.2.5 the dev_appserver.py aggressively adds some logging handlers. # This removes the handlers but note that Nose will still capture logging and # report it during failures. See Issue 25 for more info. rootLogger = logging.getLogger() for handler in rootLogger.handlers: if isinstance(handler, logging.StreamHandler): rootLogger.removeHandler(handler)
try: self.cur.execute("UPDATE highscore SET score='"+str(score)+"' WHERE name='"+name+"'") except sqlite3.Error, e: print "Ooops:", e.args[0]
def update(self, name, score, font): player = [] i = 1 try: self.cur.execute('INSERT INTO highscore VALUES (null, ?, ?)', (name, score))
connection = sqlite.connect('test.db')
connection = sqlite3.connect('test.db')
def main(): pygame.init() try: w = 640 h = 480 screen = pygame.display.set_mode((w, h)) font = pygame.font.SysFont('Arial Black', 20) pygame.mouse.set_visible(False) clock = pygame.time.Clock() ch = CollisionHandler() TIMEEVENT = USEREVENT + 1 pygame.time.set_timer(TIMEEVENT, 15) # Load our balls balls = [ Ball(screen, (70, 15), (random.randint(1,3),random.randint(1,3)) ), Ball(screen, (200, 60), (random.randint(1,3),random.randint(1,3)) ), Ball(screen, (random.randint(50, 200), 75), (3,2) ), Ball(screen, (50, 45), (3,2) ), Ball(screen, (100, 200), (1,3) ) ] for ball in balls: ch.addBall(ball) # Insert walls walls = [ Wall( screen, (0,30), (10, screen.get_height()) ), Wall( screen, (screen.get_width()-10,30), (10, screen.get_height()) ), Wall( screen, (0,30), (screen.get_width(), 10) ) ] for wall in walls: ch.addObject(wall) paddle = Paddle(screen) ch.addObject(paddle) # Game variables run = True gameover = False viewHighScore = False; lifes = len(balls)-1 time = 0 name = "Johan" score = 0 # Sqllite init connection = sqlite.connect('test.db') cursor = connection.cursor() try: cursor.execute('CREATE TABLE highscore (id INTEGER PRIMARY KEY, name VARCHAR(50), score INTEGER)') except sqlite.Error, e: print "Create table:", e.args[0] # Load scoreboard scoreBoard = font.render("Life: " + str(lifes) + " Score: ", True, (255, 0, 0)) while not gameover: # Check for quits for event in pygame.event.get(): if event.type == pygame.QUIT: gameover = True cursor.close() connection.commit() connection.close() if event.type == TIMEEVENT: time += 1 # Update positions for balls for ball in balls: if ball.update(): lifes -= 1 # Update positions for paddle paddle.update() # Update collision handler if ch.update(): score += 1 # Draw background screen.fill((0, 0, 0)) # Draw walls for wall in walls: wall.draw() # Draw paddle paddle.draw() # Draw balls for ball in balls: ball.draw() #Draw scoreboard if run: scoreBoard = font.render("Life: " + str(lifes) + " Score: " + str(score), True, (255, 0, 0)) pygame.draw.rect(screen, (0, 255, 255), (0, 0, time, 30)) screen.blit(scoreBoard, (0, 5)) if time >= screen.get_width(): time = 0; balls.append(ch.addBall(Ball(screen, (random.randint(50, 100), random.randint(100, 200)), (random.randint(1,3),random.randint(1,3)) ))) if lifes <= 0 and run: pygame.time.set_timer(TIMEEVENT, 0) finalScore = score viewHighScore = True run = False name = inputbox.ask(screen, "Your name ") try: cursor.execute("SELECT id FROM highscore WHERE name = ?", (name,)) data=cursor.fetchone() if data is None: print('There is no component named %s'%name) cursor.execute('INSERT INTO highscore VALUES (null, ?, ?)', (name, score)) else: print('Component %s found with rowid %s'%(name,data[0])) except sqlite.Error, e: print "Ooops:", e.args[0] cursor.execute("UPDATE highscore SET score='"+str(finalScore)+"' WHERE name='"+name+"'") cursor.execute('SELECT * FROM highscore ORDER BY score DESC LIMIT 0,10') i = 1 player = [] for row in cursor: #Loopa genom player.append(font.render(str(i) + '. ' + str(row[1]) + ' - ' + str(row[2]), True, (255, 0, 0))) i += 1 print answer scoreBoard = font.render("Life: 0 Score: " + str(finalScore), True, (255, 0, 0)) if viewHighScore: i = 30 gameOverImg = font.render("Game Over", True, (255, 0, 0)) highScoreImg = font.render(" Name Score", True, (255, 0, 0)) for row in player: screen.blit(row, (screen.get_width()/2-100, screen.get_height()/2+50+i)) i += 30 screen.blit(gameOverImg, (screen.get_width()/2, screen.get_height()/2)) screen.blit(highScoreImg, (screen.get_width()/2-100, screen.get_height()/2+50)) # Update screen pygame.display.flip() clock.tick(60) finally: pygame.quit()
except sqlite.Error, e:
except sqlite3.Error, e:
def main(): pygame.init() try: w = 640 h = 480 screen = pygame.display.set_mode((w, h)) font = pygame.font.SysFont('Arial Black', 20) pygame.mouse.set_visible(False) clock = pygame.time.Clock() ch = CollisionHandler() TIMEEVENT = USEREVENT + 1 pygame.time.set_timer(TIMEEVENT, 15) # Load our balls balls = [ Ball(screen, (70, 15), (random.randint(1,3),random.randint(1,3)) ), Ball(screen, (200, 60), (random.randint(1,3),random.randint(1,3)) ), Ball(screen, (random.randint(50, 200), 75), (3,2) ), Ball(screen, (50, 45), (3,2) ), Ball(screen, (100, 200), (1,3) ) ] for ball in balls: ch.addBall(ball) # Insert walls walls = [ Wall( screen, (0,30), (10, screen.get_height()) ), Wall( screen, (screen.get_width()-10,30), (10, screen.get_height()) ), Wall( screen, (0,30), (screen.get_width(), 10) ) ] for wall in walls: ch.addObject(wall) paddle = Paddle(screen) ch.addObject(paddle) # Game variables run = True gameover = False viewHighScore = False; lifes = len(balls)-1 time = 0 name = "Johan" score = 0 # Sqllite init connection = sqlite.connect('test.db') cursor = connection.cursor() try: cursor.execute('CREATE TABLE highscore (id INTEGER PRIMARY KEY, name VARCHAR(50), score INTEGER)') except sqlite.Error, e: print "Create table:", e.args[0] # Load scoreboard scoreBoard = font.render("Life: " + str(lifes) + " Score: ", True, (255, 0, 0)) while not gameover: # Check for quits for event in pygame.event.get(): if event.type == pygame.QUIT: gameover = True cursor.close() connection.commit() connection.close() if event.type == TIMEEVENT: time += 1 # Update positions for balls for ball in balls: if ball.update(): lifes -= 1 # Update positions for paddle paddle.update() # Update collision handler if ch.update(): score += 1 # Draw background screen.fill((0, 0, 0)) # Draw walls for wall in walls: wall.draw() # Draw paddle paddle.draw() # Draw balls for ball in balls: ball.draw() #Draw scoreboard if run: scoreBoard = font.render("Life: " + str(lifes) + " Score: " + str(score), True, (255, 0, 0)) pygame.draw.rect(screen, (0, 255, 255), (0, 0, time, 30)) screen.blit(scoreBoard, (0, 5)) if time >= screen.get_width(): time = 0; balls.append(ch.addBall(Ball(screen, (random.randint(50, 100), random.randint(100, 200)), (random.randint(1,3),random.randint(1,3)) ))) if lifes <= 0 and run: pygame.time.set_timer(TIMEEVENT, 0) finalScore = score viewHighScore = True run = False name = inputbox.ask(screen, "Your name ") try: cursor.execute("SELECT id FROM highscore WHERE name = ?", (name,)) data=cursor.fetchone() if data is None: print('There is no component named %s'%name) cursor.execute('INSERT INTO highscore VALUES (null, ?, ?)', (name, score)) else: print('Component %s found with rowid %s'%(name,data[0])) except sqlite.Error, e: print "Ooops:", e.args[0] cursor.execute("UPDATE highscore SET score='"+str(finalScore)+"' WHERE name='"+name+"'") cursor.execute('SELECT * FROM highscore ORDER BY score DESC LIMIT 0,10') i = 1 player = [] for row in cursor: #Loopa genom player.append(font.render(str(i) + '. ' + str(row[1]) + ' - ' + str(row[2]), True, (255, 0, 0))) i += 1 print answer scoreBoard = font.render("Life: 0 Score: " + str(finalScore), True, (255, 0, 0)) if viewHighScore: i = 30 gameOverImg = font.render("Game Over", True, (255, 0, 0)) highScoreImg = font.render(" Name Score", True, (255, 0, 0)) for row in player: screen.blit(row, (screen.get_width()/2-100, screen.get_height()/2+50+i)) i += 30 screen.blit(gameOverImg, (screen.get_width()/2, screen.get_height()/2)) screen.blit(highScoreImg, (screen.get_width()/2-100, screen.get_height()/2+50)) # Update screen pygame.display.flip() clock.tick(60) finally: pygame.quit()
if str(user_agent).find(ua.keyword) != -1:
if unicode(user_agent).find(ua.keyword) != -1:
def process_request(self, request): # don't process AJAX requests if request.is_ajax(): return
visitor.save()
try: visitor.save() except DatabaseError: pass
def process_request(self, request): # don't process AJAX requests if request.is_ajax(): return
'R':[]
def cvecs( self ): """ Return centering vectors for this space group. """ typ = self.mydata['symb'][0] ##TODO: find vectors for B and test for A and C and R ##TODO: http://img.chem.ucl.ac.uk/sgp/large/146az1.htm vs = { 'A':[ Vec( 0, 0.5, 0.5 ) ], 'C':[ Vec( 0.5, 0.5, 0 ) ], 'B':[], ##TODO: <<< error 'F':[ Vec( 0, 0.5, 0.5 ),Vec( 0.5, 0, 0.5 ), Vec( 0.5, 0.5, 0 ) ], 'I':[ Vec( 0.5, 0.5, 0.5 ) ], 'P':[], 'R':[] ##TODO: <<< error (some coordinate orientation has +(2/3,1/3,1/3),(1/3,2/3,2/3) } return vs[ typ ]
return vs[ typ ]
if typ in vs: return vs[ typ ] elif typ == 'R': if self.snum == 2: return [] elif self.snum == 1: return [ Vec( 2/3.0, 1/3.0, 1/3.0 ), Vec( 1/3.0, 2/3.0, 2/3.0 ) ]
def cvecs( self ): """ Return centering vectors for this space group. """ typ = self.mydata['symb'][0] ##TODO: find vectors for B and test for A and C and R ##TODO: http://img.chem.ucl.ac.uk/sgp/large/146az1.htm vs = { 'A':[ Vec( 0, 0.5, 0.5 ) ], 'C':[ Vec( 0.5, 0.5, 0 ) ], 'B':[], ##TODO: <<< error 'F':[ Vec( 0, 0.5, 0.5 ),Vec( 0.5, 0, 0.5 ), Vec( 0.5, 0.5, 0 ) ], 'I':[ Vec( 0.5, 0.5, 0.5 ) ], 'P':[], 'R':[] ##TODO: <<< error (some coordinate orientation has +(2/3,1/3,1/3),(1/3,2/3,2/3) } return vs[ typ ]
("M21" , (1, 2, 3, 4, 5, 1)),
("M21" , (1, 0, 2, 3, 4, 1)),
def to_sort( self ): cond = ( ("K1" , (1, 1, 1, 1, 1, 1)), ("K3" , (1, 0, 1, 1, 0, 1)), ("K5" , (0, 0, 0, 1, 1, 1)), ("H4" , (0, 1, 0, 1, 2, 1)), ("Q1" , (1, 2, 1, 1, 2, 1)), ("Q2" , (1, 0, 1, 1, 2, 1)), ("Q5" , (0, 0, 0, 1, 2, 1)), ("R1" , (1, 1, 1, 2, 2, 2)), ("R3" , (1, 0, 1, 1, 0, 2)), ("O11" , (1, 2, 1, 1, 3, 1)), ("O12" , (1, 3, 2, 1, 3, 2)), ("O2" , (1, 0, 2, 2, 3, 1)), ("O3" , (1, 0, 2, 1, 0, 2)), ("O4" , (0, 2, 0, 1, 3, 1)), ("O5" , (0, 0, 0, 1, 2, 3)), ("+O3" , (1, 0, 2, 2, 0, 1)), ("+O4" , (0, 1, 0, 1, 2, 3)), ("+O5" , (0, 0, 1, 0, 2, 3)), ("M11" , (1, 3, 2, 1, 4, 2)), ("M12" , (1, 3, 1, 2, 4, 2)), ("M21" , (1, 2, 3, 4, 5, 1)), ("M22" , (1, 0, 2, 1, 3, 2)), ("M3" , (1, 0, 2, 1, 0, 3)), ("M4" , (0, 1, 0, 2, 3, 4)), ("+M22" , (2, 0, 2, 1, 3, 1)), ("+M3" , (2, 0, 1, 1, 0, 3)), ("T1" , (1, 2, 3, 4, 5, 6)), ("T2" , (1, 0, 2, 3, 4, 5)), ("T3" , (1, 0, 2, 3, 0, 4)) ) def test( c, z ): g = {} for i in xrange( 6 ): n = c[ i ] if ( n == 0 and abs( z[ i ] ) > 0.001 ) or\ ( n != 0 and abs( z[ i ] ) < 0.001 ): ## zeroes only on zeores return False else: v = g.get( n, None ) if v: ## test with group if abs( z[ i ] - v ) > 0.001: return False else: g[ n ] = z[ i ] ## new group return True z = self.norm() for c in cond: for p in xrange( 24 ): zr = z.rotate( p ) if test( c[ 1 ], zr ): return Sort( c[ 0 ] )
print ds
def testMakeDS(self): ds = dns.dnssec.make_ds(abs_dnspython_org, sep_key, 'SHA256') print ds self.failUnless(ds == good_ds)
if tok.get_string() != r'\
token = tok.get() if not token.is_identifier() or token.value != '\
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): if tok.get_string() != r'\#': raise dns.exception.SyntaxError, \ r'generic rdata does not start with \#' length = tok.get_int() chunks = [] while 1: token = tok.get() if token.is_eol_or_eof(): break chunks.append(token.value) hex = ''.join(chunks) data = hex.decode('hex_codec') if len(data) != length: raise dns.exception.SyntaxError, \ 'generic rdata hex data has wrong length' return cls(rdclass, rdtype, data)
if token.is_identifier and \
if token.is_identifier() and \
def from_text(rdclass, rdtype, tok, origin = None, relativize = True): """Build an rdata object from text format. This function attempts to dynamically load a class which implements the specified rdata class and type. If there is no class-and-type-specific implementation, the GenericRdata class is used. Once a class is chosen, its from_text() class method is called with the parameters to this function. @param rdclass: The rdata class @type rdclass: int @param rdtype: The rdata type @type rdtype: int @param tok: The tokenizer @type tok: dns.tokenizer.Tokenizer @param origin: The origin to use for relative names @type origin: dns.name.Name @param relativize: Should names be relativized? @type relativize: bool @rtype: dns.rdata.Rdata instance""" if isinstance(tok, str): tok = dns.tokenizer.Tokenizer(tok) cls = get_rdata_class(rdclass, rdtype) if cls != GenericRdata: # peek at first token token = tok.get() tok.unget(token) if token.is_identifier and \ token.value == r'\#': # # Known type using the generic syntax. Extract the # wire form from the generic syntax, and then run # from_wire on it. # rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin, relativize) return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data), origin) return cls.from_text(rdclass, rdtype, tok, origin, relativize)
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5.md5 hashes[dns.name.from_text('hmac-sha1')] = sha.sha
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5 hashes[dns.name.from_text('hmac-sha1')] = sha
def new(self, *args, **kwargs): return self.basehash(*args, **kwargs)
@param fudge: TSIG time fudge; default is 300 seconds.
@param fudge: TSIG time fudge
def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data, request_mac, algorithm=dns.tsig.default_algorithm): """Add a TSIG signature to the message.
if opt in ('-z', '--reset'): self.q_record, self.q_data = None, None
def ParseArgs(self, argv): opts, args = self.ParseWithCommonArgs(argv, OPT_FLAGS, OPT_ARGS)
bpy.ops.transform.resize(value=(1,1,-1), constraint_axis=(False, False, True), constraint_orientation='GLOBAL')
def export_char(outputFile, meshObj = None): '''Exports a mesh as a LoL .skn file. outputFile: Name of file to save the mesh as meshObj: Blender mesh object to export. If none is given we will look for one named 'lolMesh' ''' import bpy if meshObj == None: #If no mesh object was supplied, try the active selection if bpy.context.object.type =='MESH': meshObj = bpy.context.object #If the selected object wasn't a mesh, try finding one named 'lolMesh' else: try: meshObj = bpy.data.objects['lolMesh'] except KeyError: errStr = ''' No mesh object supplied, no mesh selected, and no mesh
import_sco(self.filepath)
import_sco(self.properties.filepath)
def execute(self, context): import_sco(self.filepath) return {'FINISHED'}
group_licenses.append(licenses[license_id])
license = licenses[license_id] group_licenses.append(license.copy())
def license_cmp(x, y): return cmp(x['title'].lower(), y['title'].lower())
import urllib2 import simplejson
def get_names(self): from pylons import config import urllib2 import simplejson url = config.get('licenses_service_url', self.default_url) try: response = urllib2.urlopen(url) response_body = response.read() except Exception, inst: msg = "Couldn't connect to licenses service: %s" % inst raise Exception, msg try: license_names = simplejson.loads(response_body) except Exception, inst: msg = "Couldn't read response from licenses service: %s" % inst raise Exception, inst return [unicode(l) for l in license_names]
license_names = simplejson.loads(response_body)
license_names = loads(response_body)
def get_names(self): from pylons import config import urllib2 import simplejson url = config.get('licenses_service_url', self.default_url) try: response = urllib2.urlopen(url) response_body = response.read() except Exception, inst: msg = "Couldn't connect to licenses service: %s" % inst raise Exception, msg try: license_names = simplejson.loads(response_body) except Exception, inst: msg = "Couldn't read response from licenses service: %s" % inst raise Exception, inst return [unicode(l) for l in license_names]
pafser.add_option("-s", "--sensor", action="append", type="string",
parser.add_option("-s", "--sensor", action="append", type="string",
def main(): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default="/lsst/DC3/data/obstest/ImSim", help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-R", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit numbers (can be repeated)") parser.add_option("-r", "--raft", action="append", type="string", help="raft name (can be repeated)") pafser.add_option("-s", "--sensor", action="append", type="string", help="sensor name (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if options.registry is None: if os.path.exists("/lsst/DC3/data/obstest/ImSim/registry.sqlite3"): options.registry = "/lsst/DC3/data/obstest/ImSim/registry.sqlite3" if options.calibRoot is None: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = inButler.queryMetadata("raw", "visit") elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = inButler.queryMetadata("raw", "raft") elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = inButler.queryMetadata("raw", "sensor") elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] for visit in options.visit: for raft in options.raft: for sensor in options.sensor: try: process(inButler, outButler, visit, raft, sensor, options.force) except Exception, e: traceback.print_exc() print >>sys.stderr, "Continuing..."
def isrProcess(f):
def isrProcess(f, doJobOffice=False):
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
eventTopic: None
eventTopic: """ + ("None" if doJobOffice else "jobIdentity") + """
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ }
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
exposure: isrExposure""" + channelSnap + """
exposureKey: isrExposure""" + channelSnap + """
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @PT1Pipe/ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """
stagePolicy: { inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ }
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """
stagePolicy: { inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { defectMaskedCcdExposure: isrExposure""" + str(snap) + """ }
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """
stagePolicy: { inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ }
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """
stagePolicy: { inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ }
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @PT1Pipe/ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
parallelClass: lsst.meas.pipeline.WcsVerificationStageParallel
parallelClass: lsst.meas.pipeline.WcsVerificationParallel
def imgCharProcess(f): print >>f, """ appStage: { name: icSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure } outputKeys: { positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet psf: simplePsf } psfPolicy: @PT1Pipe/ImgChar-sourceDetect-psf.paf backgroundPolicy: @PT1Pipe/ImgChar-sourceDetect-background.paf } } appStage: { name: icSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure psf: simplePsf positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: icPsfDetermination parallelClass: lsst.meas.pipeline.PsfDeterminationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure sourceSet: sourceSet } outputKeys: { psf: measuredPsf cellSet: cellSet sdqa: sdqa } } } appStage: { name: icWcsDetermination parallelClass: lsst.meas.pipeline.WcsDeterminationStageParallel eventTopic: None stagePolicy: @PT1Pipe/ImgChar-wcsDetermination.paf } appStage: { name: icWcsVerification parallelClass: lsst.meas.pipeline.WcsVerificationStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList } } appStage: { name: icPhotoCal parallelClass: lsst.meas.pipeline.PhotoCalStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList outputValueKey: photometricMagnitudeObject } }""" print >>f, """ appStage: { name: icOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @PT1Pipe/butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: icSrc datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } measuredPsf: { datasetType: psf datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } visitExposure: { datasetType: calexp datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }""" print >>f, """ appStage: { name: icFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { calibratedExposure: visitExposure psf: measuredPsf } parameters: { pipeline: ImgChar } outputKeys: { calibratedExposure: scienceExposure psf: psf } } }"""
def createPolicy(f):
def createPolicy(f, doJobOffice=False):
def createPolicy(f): pipelinePolicy(f) jobStart(f) isrProcess(f) ccdAssemblyProcess(f) crSplitProcess(f) imgCharProcess(f) sfmProcess(f) jobFinish(f) print >>f, "}"
jobStart(f)
if doJobOffice: jobStart(f)
def createPolicy(f): pipelinePolicy(f) jobStart(f) isrProcess(f) ccdAssemblyProcess(f) crSplitProcess(f) imgCharProcess(f) sfmProcess(f) jobFinish(f) print >>f, "}"
jobFinish(f)
if doJobOffice: jobFinish(f)
def createPolicy(f): pipelinePolicy(f) jobStart(f) isrProcess(f) ccdAssemblyProcess(f) crSplitProcess(f) imgCharProcess(f) sfmProcess(f) jobFinish(f) print >>f, "}"
if len(sys.argv) > 1: with open(sys.argv[1], "w") as f: createPolicy(f)
parser = OptionParser() parser.add_option("-j", "--jobOffice", action="store_true", default=False, help="write JobOffice stages into the pipeline") options, args = parser.parse_args() if len(args) > 0: with open(args[0], "w") as f: createPolicy(f, options.jobOffice)
def main(): if len(sys.argv) > 1: with open(sys.argv[1], "w") as f: createPolicy(f) else: createPolicy(sys.stdout)
createPolicy(sys.stdout)
createPolicy(sys.stdout, options.jobOffice)
def main(): if len(sys.argv) > 1: with open(sys.argv[1], "w") as f: createPolicy(f) else: createPolicy(sys.stdout)
parser.add_option("-r", "--registry", help="registry")
parser.add_option("-R", "--registry", help="registry")
def cfhtMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit numbers (can be repeated)") if "ccd" in need or "amp" in need: parser.add_option("-c", "--ccd", action="append", type="int", help="ccd number (can be repeated)") if "amp" in need: parser.add_option("-a", "--amp", action="append", type="int", help="amp number (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if options.registry is None: if os.path.exists("/lsst/DC3/data/obstest/CFHTLS/registry.sqlite3"): options.registry = "/lsst/DC3/data/obstest/CFHTLS/registry.sqlite3" if "calib" in need: if options.calibRoot is None: if os.path.exists("/lsst/DC3/data/obstest/CFHTLS/calib"): options.calibRoot = "/lsst/DC3/data/obstest/CFHTLS/calib" bf = dafPersist.ButlerFactory(mapper=CfhtMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=CfhtMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=CfhtMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "ccd" in need or "amp" in need: if options.ccd is None: print >>sys.stderr, "Running over all CCDs" options.ccd = [x[0] for x in inButler.queryMetadata("raw", "ccd", ("ccd",))] elif not hasattr(options.ccd, "__iter__"): options.ccd = [options.ccd] if "amp" in need: if options.amp is None: print >>sys.stderr, "Running over all amps" options.amp = [x[0] for x in inButler.queryMetadata("raw", "amp", ("amp",))] elif not hasattr(options.amp, "__iter__"): options.amp = [options.amp] for visit in options.visit: if "ccd" in need or "amp" in need: for ccd in options.ccd: if "amp" in need: for amp in options.amp: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, ccd=ccd, amp=amp): print >>sys.stderr, \ "***** Processing visit %d ccd %d amp %d" % \ (visit, ccd, amp) processFunction(inButler=inButler, outButler=outButler, visit=visit, ccd=ccd, amp=amp) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, ccd=ccd): print >>sys.stderr, \ "***** Processing visit %d ccd %d" % \ (visit, ccd) processFunction(inButler=inButler, outButler=outButler, visit=visit, ccd=ccd) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
parser.add_option("-r", "--registry", help="registry")
parser.add_option("-R", "--registry", help="registry")
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
"***** Processing " + \
("***** Processing " + \
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
"sensor %s channel %s" % \
"sensor %s channel %s") % \
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
"***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \
("***** Processing visit %d " + \ "snap %d raft %s sensor %s") % \
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
"***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \
("***** Processing visit %d " + \ "raft %s sensor %s channel %s") % \
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
"***** Processing visit %d " + \ "raft %s sensor %s" % \
("***** Processing visit %d " + \ "raft %s sensor %s") % \
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-r", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s" % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "snap %d raft %s sensor %s" % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s channel %s" % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ "***** Processing visit %d " + \ "raft %s sensor %s" % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
clip = sfmPipe(calexp, psf)
clip = sfmPipe(calexp, psf, apCorr)
def sfmProcess(root=None, outRoot=None, registry=None, inButler=None, outButler=None, **keys): inButler, outButler = lsstSimSetup(root, outRoot, registry, None, inButler, outButler) calexp = inButler.get("calexp", **keys) psf = inButler.get("psf", **keys) apCorr = inButler.get("apCorr", **keys) clip = sfmPipe(calexp, psf) outButler.put(clip['sourceSet_persistable'], "src", **keys)
def sfmPipe(calexp, psf):
def sfmPipe(calexp, psf, apCorr):
def sfmPipe(calexp, psf): clip = { 'scienceExposure': calexp, 'psf': psf } clip = runStage(measPipe.SourceDetectionStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf } outputKeys: { positiveDetection: positiveFootprintSet } backgroundPolicy: { algorithm: NONE } """, clip) clip = runStage(measPipe.SourceMeasurementStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf positiveDetection: positiveFootprintSet } outputKeys: { sources: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { apCorr: apCorr sourceSet: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { sources: sourceSet exposure: scienceExposure } """, clip)
'psf': psf
'psf': psf, 'apCorr': apCorr
def sfmPipe(calexp, psf): clip = { 'scienceExposure': calexp, 'psf': psf } clip = runStage(measPipe.SourceDetectionStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf } outputKeys: { positiveDetection: positiveFootprintSet } backgroundPolicy: { algorithm: NONE } """, clip) clip = runStage(measPipe.SourceMeasurementStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf positiveDetection: positiveFootprintSet } outputKeys: { sources: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { apCorr: apCorr sourceSet: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { sources: sourceSet exposure: scienceExposure } """, clip)
clip = runStage(measPipe.ComputeSourceSkyCoordsStage,
clip = runStage(measPipe.ApertureCorrectionApplyStage,
def sfmPipe(calexp, psf): clip = { 'scienceExposure': calexp, 'psf': psf } clip = runStage(measPipe.SourceDetectionStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf } outputKeys: { positiveDetection: positiveFootprintSet } backgroundPolicy: { algorithm: NONE } """, clip) clip = runStage(measPipe.SourceMeasurementStage, """#<?cfg paf policy?> inputKeys: { exposure: scienceExposure psf: psf positiveDetection: positiveFootprintSet } outputKeys: { sources: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { apCorr: apCorr sourceSet: sourceSet } """, clip) clip = runStage(measPipe.ComputeSourceSkyCoordsStage, """#<?cfg paf policy?> inputKeys: { sources: sourceSet exposure: scienceExposure } """, clip)