rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
cnv_ls = [[1, (2323,2600)], [2,(50000,)], [3,(43214,78788)], [5, (43242,)], [5,(144,566)], [5,(150,500)], [5,(500,950)], [5, (43241, 43242)]]
cnv_ls = [[1, (2323,2600)], [2,(50000,)], [3,(43214,78788)], [5,(150,500)], [5,(500,950)], [5, (43241, 43242)]]
def getCNVDataFromFileInGWA(input_fname_ls, array_id, max_amp=-0.33, min_amp=-0.33, min_size=50, min_no_of_probes=None, report=False): """ 2009-10-31 get deletion (below max_amp) or duplication (above min_amp) from files (output by RunGADA.py) """ sys.stderr.write("Getting CNV calls for array %s, min_size %s, min_no_of_probes %s from %s ..."%\ (array_id, min_size, min_no_of_probes, repr(input_fname_ls))) gwr_name = "(a-id %s)"%(array_id) gwr = GenomeWideResult(name=gwr_name) gwr.data_obj_ls = [] #list and dictionary are crazy references. gwr.data_obj_id2index = {} genome_wide_result_id = id(gwr) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_segments = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) ecotype_id = None for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') cnv_array_id = int(row[col_name2index['array_id']]) cnv_ecotype_id = int(row[col_name2index.get('ecotype_id', col_name2index['array_id'])]) counter += 1 if cnv_array_id==array_id: no_of_segments += 1 if ecotype_id is None: ecotype_id = cnv_ecotype_id start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) start_probe_id = row[col_name2index.get('start_probe_id', col_name2index['start_probe'])] stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) end_probe_id = row[col_name2index.get('end_probe_id', col_name2index['end_probe'])] no_of_probes = int(row[col_name2index['length']]) if min_no_of_probes is not None and no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos) if min_size is not None and segment_length<min_size: continue if amplitude<=max_amp or amplitude>=min_amp: real_counter += 1 data_obj = DataObject(chromosome=segment_chromosome, position=segment_start_pos, stop_position=segment_stop_pos, \ value=amplitude) data_obj.comment = 'start probe-id %s, end probe-id %s, no of probes %s'%\ (start_probe_id, end_probe_id, no_of_probes) data_obj.genome_wide_result_id = genome_wide_result_id gwr.add_one_data_obj(data_obj) if report and counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_segments, real_counter)) sys.stderr.write("\n") if gwr.max_value<3: # insertion at y=3 gwr.max_value=3 if gwr.min_value>-1: # deletion at y = -1 gwr.min_value = -1 gwr.name = '%s '%ecotype_id + gwr.name setattr(gwr, 'ecotype_id', ecotype_id) sys.stderr.write(" %s segments. Done.\n"%(len(gwr.data_obj_ls))) return gwr
print "==", segmentKey1==segmentKey2
print "==", segmentKey1==segmentKey2 """
def getCNVDataFromFileInGWA(input_fname_ls, array_id, max_amp=-0.33, min_amp=-0.33, min_size=50, min_no_of_probes=None, report=False): """ 2009-10-31 get deletion (below max_amp) or duplication (above min_amp) from files (output by RunGADA.py) """ sys.stderr.write("Getting CNV calls for array %s, min_size %s, min_no_of_probes %s from %s ..."%\ (array_id, min_size, min_no_of_probes, repr(input_fname_ls))) gwr_name = "(a-id %s)"%(array_id) gwr = GenomeWideResult(name=gwr_name) gwr.data_obj_ls = [] #list and dictionary are crazy references. gwr.data_obj_id2index = {} genome_wide_result_id = id(gwr) amp_ls = [] array_id2array = {} counter = 0 real_counter = 0 no_of_segments = 0 input_handler = fileinput.input(input_fname_ls) header = input_handler.readline().strip().split('\t') col_name2index = getColName2IndexFromHeader(header) ecotype_id = None for line in input_handler: if line.find("array_id")!=-1: continue line = line.strip() row = line.split('\t') cnv_array_id = int(row[col_name2index['array_id']]) cnv_ecotype_id = int(row[col_name2index.get('ecotype_id', col_name2index['array_id'])]) counter += 1 if cnv_array_id==array_id: no_of_segments += 1 if ecotype_id is None: ecotype_id = cnv_ecotype_id start_probe = row[col_name2index['start_probe']].split('_') # split chr_pos start_probe = map(int, start_probe) start_probe_id = row[col_name2index.get('start_probe_id', col_name2index['start_probe'])] stop_probe = row[col_name2index['end_probe']].split('_') stop_probe = map(int, stop_probe) end_probe_id = row[col_name2index.get('end_probe_id', col_name2index['end_probe'])] no_of_probes = int(row[col_name2index['length']]) if min_no_of_probes is not None and no_of_probes<min_no_of_probes: continue amplitude = float(row[col_name2index['amplitude']]) segment_chromosome = start_probe[0] segment_start_pos = start_probe[1]-12 segment_stop_pos = stop_probe[1]+12 segment_length = abs(segment_stop_pos-segment_start_pos) if min_size is not None and segment_length<min_size: continue if amplitude<=max_amp or amplitude>=min_amp: real_counter += 1 data_obj = DataObject(chromosome=segment_chromosome, position=segment_start_pos, stop_position=segment_stop_pos, \ value=amplitude) data_obj.comment = 'start probe-id %s, end probe-id %s, no of probes %s'%\ (start_probe_id, end_probe_id, no_of_probes) data_obj.genome_wide_result_id = genome_wide_result_id gwr.add_one_data_obj(data_obj) if report and counter%10000==0: sys.stderr.write('%s%s\t%s\t%s'%('\x08'*80, counter, no_of_segments, real_counter)) sys.stderr.write("\n") if gwr.max_value<3: # insertion at y=3 gwr.max_value=3 if gwr.min_value>-1: # deletion at y = -1 gwr.min_value = -1 gwr.name = '%s '%ecotype_id + gwr.name setattr(gwr, 'ecotype_id', ecotype_id) sys.stderr.write(" %s segments. Done.\n"%(len(gwr.data_obj_ls))) return gwr
@classmethod
def __init__(self, **keywords): """ 2008-11-10 upgrade option handling to ProcessOptions 2008-4-2 2008-02-28 argument_default_dict is a dictionary of default arguments, the key is a tuple, ('argument_name', is_argument_required, argument_type) argument_type is optional """ #argument dictionary #self.ad = process_function_arguments(keywords, argument_default_dict, error_doc=__doc__, class_to_have_attr=self) from pymodule import ProcessOptions self.ad = ProcessOptions.process_function_arguments(keywords, self.option_default_dict, error_doc=self.__doc__, class_to_have_attr=self)
get_phenotype_method_id_info = classmethod(get_phenotype_method_id_info)
@classmethod
def get_phenotype_method_id_info(cls, curs, phenotype_avg_table, phenotype_method_table ): """ 2009-2-2 curs could be either MySQLdb cursor or elixirdb.metadata.bind. do two selects in one 2008-4-2 """ sys.stderr.write("Getting phenotype_method_id info ... " ) phenotype_method_id2index = {} #index of the matrix method_id_name_ls = [] #as header for each phenotype phenotype_id_ls = [] rows = curs.execute("select m.id, m.short_name, m.transformation_description from %s m, (select distinct method_id from %s) p where m.id=p.method_id order by id"%\ (phenotype_method_table, phenotype_avg_table)) is_elixirdb = 1 if hasattr(curs, 'fetchall'): #2009-2-2 this curs is not elixirdb.metadata.bind rows = curs.fetchall() is_elixirdb = 0 phenotype_method_id2transformation_description = {} for row in rows: if is_elixirdb: method_id = row.id method_short_name = row.short_name transformation_description = row.transformation_description else: method_id, method_short_name, transformation_description = row[:3] """ curs.execute("select short_name, transformation_description from %s where id=%s"%(phenotype_method_table, method_id)) pm_rows = curs.fetchall() method_short_name = pm_rows[0][0] transformation_description = pm_rows[0][1] """ phenotype_id_ls.append(method_id) method_id_name_ls.append('%s_%s'%(method_id, method_short_name)) phenotype_method_id2index[method_id] = len(phenotype_method_id2index) if transformation_description=='None': transformation_description = None phenotype_method_id2transformation_description[method_id] = transformation_description return_data = PassingData(phenotype_method_id2index=phenotype_method_id2index, method_id_name_ls=method_id_name_ls,\ phenotype_id_ls=phenotype_id_ls,\ phenotype_method_id2transformation_description=phenotype_method_id2transformation_description) sys.stderr.write("Done\n") return return_data
get_ecotype_id2info = classmethod(get_ecotype_id2info)
@classmethod
def get_ecotype_id2info(cls, curs, phenotype_avg_table, ecotype_table): """ 2009-2-2 curs could be either MySQLdb cursor or elixirdb.metadata.bind. do two selects in one 2008-4-2 """ sys.stderr.write("Getting ecotype id info ... " ) ecotype_id2index = {} #index of the matrix ecotype_id_ls = [] ecotype_name_ls = [] rows = curs.execute("select e.id, e.nativename from %s e, (select distinct ecotype_id from %s) p where e.id=p.ecotype_id order by id"%\ (ecotype_table, phenotype_avg_table)) is_elixirdb = 1 if hasattr(curs, 'fetchall'): #2009-2-2 this curs is not elixirdb.metadata.bind rows = curs.fetchall() is_elixirdb = 0 for row in rows: if is_elixirdb: ecotype_id = row.id nativename = row.nativename else: ecotype_id, nativename = row[:2] """ curs.execute("select nativename from %s where id=%s"%(ecotype_table, ecotype_id)) nativename = curs.fetchall()[0][0] """ ecotype_name_ls.append(nativename) ecotype_id_ls.append(ecotype_id) ecotype_id2index[ecotype_id] = len(ecotype_id2index) sys.stderr.write("Done\n") return ecotype_id2index, ecotype_id_ls, ecotype_name_ls
get_matrix = classmethod(get_matrix)
@classmethod
def get_matrix(cls, curs, phenotype_avg_table, ecotype_id2index, phenotype_info, get_raw_data=0, \ phenotype_method_table='phenotype_method'): """ 2009-9-2 if value>-5e-7 and value<+5e-7: #beyond float resolution by a python float value = 0 without condition above, values like -5.32907e-15 would be taken as -5.32907e, -3.76545e-12 as -3.76545 2009-9-2 add phenotype_method_table to get stddev, min_value to do certain transformation involving these two variables 2009-2-2 curs could be either MySQLdb cursor or elixirdb.metadata.bind. average phenotype values among replicates in the same phenotype method 2008-11-10 add code to transform phenotype according to phenotype_info.phenotype_method_id2transformation_description add option get_raw_data, if True/1, no transformation. 2008-04-23 #some db entries (phenotype_avg.value) have nothing there. convert None to 'NA' 2008-04-09 no longer uses numpy matrix. just simple 2-d list. 2008-4-2 """ sys.stderr.write("Getting matrix ... " ) #data_matrix = numpy.zeros([len(ecotype_id2index), len(phenotype_method_id2index)], numpy.float) data_matrix = [[]]*len(ecotype_id2index) for i in range(len(ecotype_id2index)): data_matrix[i] = ['NA']*len(phenotype_info.phenotype_method_id2index) #data_matrix[:] = numpy.nan rows = curs.execute("select pa.ecotype_id, pa.method_id, pa.value, pm.min_value, pm.stddev from %s pa, %s pm where pm.id=pa.method_id"%\ (phenotype_avg_table, phenotype_method_table)) is_elixirdb = 1 if hasattr(curs, 'fetchall'): #2009-2-2 this curs is not elixirdb.metadata.bind rows = curs.fetchall() is_elixirdb = 0 for row in rows: if is_elixirdb: ecotype_id = row.ecotype_id phenotype_method_id = row.method_id value = row.value min_value = row.min_value stddev = row.stddev else: ecotype_id, phenotype_method_id, value, min_value, stddev = row if value==None: #some db entries have nothing there. convert None to 'NA' value = 'NA' elif not get_raw_data: #2008-11-10 transformation_description = phenotype_info.phenotype_method_id2transformation_description.get(phenotype_method_id) if value>-5e-7 and value<+5e-7: #beyond float resolution by a python float value = 0 if not transformation_description: pass elif transformation_description.find('Log(x)')!=-1: try: value = math.log10(value) except: sys.stderr.write("Ecotype ID %s, phenotype_method_id %s, value %s.\n"%(ecotype_id, phenotype_method_id, value)) sys.stderr.write('Except type: %s\n'%repr(sys.exc_info()[0])) traceback.print_exc() print sys.exc_info() #raise sys.exc_info()[0] sys.exit(2) elif transformation_description=="Log(SD/10+x-minVal)": #2009-9-1 new transformation if min_value is not None and stddev is not None: value = math.log10(stddev/10. + value-min_value) else: value = value elif transformation_description=='Log(5+x)': value = math.log10(5+value) elif transformation_description=='Log(0.5+x)': value = math.log10(0.5+value) elif transformation_description=='(x-3)': value = value-3 col_index = phenotype_info.phenotype_method_id2index[phenotype_method_id] data_matrix[ecotype_id2index[ecotype_id]][col_index] = value sys.stderr.write("Done\n") return data_matrix
getPhenotypeData = classmethod(getPhenotypeData)
def getPhenotypeData(cls, curs, phenotype_avg_table=None, phenotype_method_table=None, ecotype_table='stock.ecotype', get_raw_data=1): """ 2009-2-2 wrap up all other 3 methods """ phenotype_info = cls.get_phenotype_method_id_info(curs, phenotype_avg_table, phenotype_method_table) ecotype_id2index, ecotype_id_ls, ecotype_name_ls = cls.get_ecotype_id2info(curs, phenotype_avg_table, ecotype_table) data_matrix = cls.get_matrix(curs, phenotype_avg_table, ecotype_id2index, phenotype_info, get_raw_data) pheno_data = SNPData(col_id_ls=phenotype_info.phenotype_id_ls, row_id_ls=ecotype_id_ls, data_matrix=data_matrix) pheno_data.row_label_ls = ecotype_name_ls pheno_data.col_label_ls = phenotype_info.method_id_name_ls return pheno_data
write_data_matrix(pheno_data.data_matrix, self.output_fname, header, pheno_data.row_id_ls, pheno_data.row_label_ls)
write_data_matrix(pheno_data.data_matrix, self.output_fname, header, pheno_data.row_id_ls, pheno_data.row_label_ls, \ transform_to_numpy=False)
def run(self): import MySQLdb conn = MySQLdb.connect(db=self.dbname, host=self.hostname, user = self.db_user, passwd = self.db_passwd) curs = conn.cursor() pheno_data = self.getPhenotypeData(curs, self.phenotype_avg_table, self.phenotype_method_table, \ self.ecotype_table, get_raw_data=self.get_raw_data) header = ['ecotype id', 'nativename'] + pheno_data.col_label_ls write_data_matrix(pheno_data.data_matrix, self.output_fname, header, pheno_data.row_id_ls, pheno_data.row_label_ls)
def output(self, data_matrix, probe_id_ls, chr_pos_ls, header, output_fname_prefix, split_genome_into_chromosomes=False): """
@classmethod def output(cls, data_matrix, probe_id_ls, chr_pos_ls, header, output_fname_prefix, split_genome_into_chromosomes=False): """ 2010-2-10 becomes classmethod
def output(self, data_matrix, probe_id_ls, chr_pos_ls, header, output_fname_prefix, split_genome_into_chromosomes=False): """ 2009-10-11 add argument split_genome_into_chromosomes 2009-5-18 split output into different chromosomes """ sys.stderr.write("Outputting ...") no_of_rows, no_of_cols = data_matrix.shape old_chr = None old_writer = None output_fname_prefix = os.path.splitext(output_fname_prefix)[0] for i in range(no_of_rows): new_chr = chr_pos_ls[i][0] if split_genome_into_chromosomes: if old_chr==None or old_chr!=new_chr: writer = csv.writer(open('%s_chr%s.tsv'%(output_fname_prefix, new_chr), 'w'), delimiter='\t') writer.writerow(header) old_chr = new_chr del old_writer #close the old file old_writer = writer elif old_writer is None: old_writer = csv.writer(open('%s.tsv'%(output_fname_prefix), 'w'), delimiter='\t') old_writer.writerow(header) data_row = [probe_id_ls[i]] for j in range(no_of_cols): data_row.append(data_matrix[i][j]) data_row.append(chr_pos_ls[i][0]) data_row.append(chr_pos_ls[i][1]) old_writer.writerow(data_row) del old_writer sys.stderr.write("Done.\n")
return redirect_to("/css/main.css")
response.headers['content-type'] = 'text/css; charset=utf-8' return render("/css/main.css")
def index(self): return redirect_to("/css/main.css")
self.url = ozutil.check_url(self.url)
self.url = ozutil.check_url(idl.url())
def __init__(self, idl, config): update = idl.update() if idl.arch() != "i386": raise Exception, "Invalid arch " + arch + "for RHEL-2.1 guest" self.ks_file = ozutil.generate_full_auto_path("rhel-2.1-jeos.ks")
self.log.info("%dkB of %dkB" % (down_current/1024, down_total/1024))
if down_total == 0: return current_mb = int(down_current) / 10485760 if current_mb > self.last_mb or down_current == down_total: self.last_mb = current_mb self.log.debug("%dkB of %dkB" % (down_current/1024, down_total/1024))
def progress(down_total, down_current, up_total, up_current): # FIXME: we should probably not print every single time this is # called; maybe every 1MB or so? self.log.info("%dkB of %dkB" % (down_current/1024, down_total/1024))
initrdline += self.url + "\n"
initrdline += " repo=" + self.url + "\n"
def modify_iso(self): self.log.debug("Putting the kickstart in place")
interfaceModel.setProp("type", "virtio")
interfaceModel.setProp("type", self.nicmodel)
def generate_define_xml(self, bootdev, want_install_disk=True): self.log.info("Generate/define XML for guest %s with bootdev %s" % (self.name, bootdev))
self.url = ozutil.check_url(self.url)
self.url = ozutil.check_url(idl.url())
def __init__(self, idl, config): update = idl.update() arch = idl.arch() self.ks_file = ozutil.generate_full_auto_path("rhel-3-jeos.ks")
disks = input_doc.xpathEval('/domain/devices/disk/source')
disks = input_doc.xpathEval('/domain/devices/disk')
def guestfs_handle_setup(self, libvirt_xml): input_doc = libxml2.parseMemory(libvirt_xml, len(libvirt_xml)) namenode = input_doc.xpathEval('/domain/name') if len(namenode) != 1: raise Exception, "invalid libvirt XML with no name" input_name = namenode[0].getContent() disks = input_doc.xpathEval('/domain/devices/disk/source') if len(disks) != 1: raise Exception, "oz cannot handle a libvirt domain with more than 1 disk" input_disk = disks[0].prop('file')
input_disk = disks[0].prop('file')
source = disks[0].xpathEval('source') if len(source) != 1: raise Exception, "invalid <disk> entry without a source" input_disk = source[0].prop('file') driver = disks[0].xpathEval('driver') if len(driver) == 0: input_disk_type = 'raw' elif len(driver) != 1: input_disk_type = driver[0].prop('type') else: raise Exception, "invalid <disk> entry without a driver"
def guestfs_handle_setup(self, libvirt_xml): input_doc = libxml2.parseMemory(libvirt_xml, len(libvirt_xml)) namenode = input_doc.xpathEval('/domain/name') if len(namenode) != 1: raise Exception, "invalid libvirt XML with no name" input_name = namenode[0].getContent() disks = input_doc.xpathEval('/domain/devices/disk/source') if len(disks) != 1: raise Exception, "oz cannot handle a libvirt domain with more than 1 disk" input_disk = disks[0].prop('file')
g.add_drive(input_disk)
g.add_drive_opts(input_disk, format=input_disk_type)
def guestfs_handle_setup(self, libvirt_xml): input_doc = libxml2.parseMemory(libvirt_xml, len(libvirt_xml)) namenode = input_doc.xpathEval('/domain/name') if len(namenode) != 1: raise Exception, "invalid libvirt XML with no name" input_name = namenode[0].getContent() disks = input_doc.xpathEval('/domain/devices/disk/source') if len(disks) != 1: raise Exception, "oz cannot handle a libvirt domain with more than 1 disk" input_disk = disks[0].prop('file')
os = g.inspect_os()
roots = g.inspect_os() if len(roots) == 0: raise Exception, "No operating systems found on the disk"
def guestfs_handle_setup(self, libvirt_xml): input_doc = libxml2.parseMemory(libvirt_xml, len(libvirt_xml)) namenode = input_doc.xpathEval('/domain/name') if len(namenode) != 1: raise Exception, "invalid libvirt XML with no name" input_name = namenode[0].getContent() disks = input_doc.xpathEval('/domain/devices/disk/source') if len(disks) != 1: raise Exception, "oz cannot handle a libvirt domain with more than 1 disk" input_disk = disks[0].prop('file')
mountpoints = g.inspect_get_mountpoints(os[0]) self.log.debug("Mounting /") for point in mountpoints: if point[0] == '/': g.mount(point[1], '/') break self.log.debug("Mount other filesystems") for point in mountpoints: if point[0] != '/': g.mount(point[1], point[0])
for root in roots: self.log.debug("Root device: %s" % root) mps = g.inspect_get_mountpoints(root) def compare(a, b): if len(a[0]) > len(b[0]): return 1 elif len(a[0]) == len(b[0]): return 0 else: return -1 mps.sort(compare) for mp_dev in mps: g.mount_options('', mp_dev[1], mp_dev[0])
def guestfs_handle_setup(self, libvirt_xml): input_doc = libxml2.parseMemory(libvirt_xml, len(libvirt_xml)) namenode = input_doc.xpathEval('/domain/name') if len(namenode) != 1: raise Exception, "invalid libvirt XML with no name" input_name = namenode[0].getContent() disks = input_doc.xpathEval('/domain/devices/disk/source') if len(disks) != 1: raise Exception, "oz cannot handle a libvirt domain with more than 1 disk" input_disk = disks[0].prop('file')
tarout = self.iso_contents + "/data.tar"
def copy_iso(self): self.log.info("Copying ISO contents for modification") if os.access(self.iso_contents, os.F_OK): shutil.rmtree(self.iso_contents) os.makedirs(self.iso_contents)
gfs.add_drive(self.orig_iso)
gfs.add_drive_opts(self.orig_iso, readonly=1, format='raw')
def copy_iso(self): self.log.info("Copying ISO contents for modification") if os.access(self.iso_contents, os.F_OK): shutil.rmtree(self.iso_contents) os.makedirs(self.iso_contents)
gfs.mount("/dev/sda", "/") self.log.debug("Getting data from ISO onto %s" % (tarout)) gfs.tar_out("/", tarout) self.log.debug("Cleaning up guestfs process")
gfs.mount_options('ro', "/dev/sda", "/") rd,wr = os.pipe() current = os.getcwd() os.chdir(self.iso_contents) tar = subprocess.Popen(["tar", "-x", "-v"], stdin=rd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.log.debug("Extracting ISO contents:") gfs.tar_out("/", "/dev/fd/%d" % wr) self.log.debug("%s" % tar.stdout.read()) os.close(rd) os.close(wr) os.chdir(current)
def copy_iso(self): self.log.info("Copying ISO contents for modification") if os.access(self.iso_contents, os.F_OK): shutil.rmtree(self.iso_contents) os.makedirs(self.iso_contents)
self.log.debug("Extracting tarball") tar = tarfile.open(tarout) tar.extractall(path=self.iso_contents) self.log.debug("Removing tarball") os.unlink(tarout)
def copy_iso(self): self.log.info("Copying ISO contents for modification") if os.access(self.iso_contents, os.F_OK): shutil.rmtree(self.iso_contents) os.makedirs(self.iso_contents)
def __init__(self, idl, config, nicmodel, haverepo, diskbus):
def __init__(self, idl, config, nicmodel, haverepo, diskbus, brokenisomethod):
def __init__(self, idl, config, nicmodel, haverepo, diskbus): update = idl.update() arch = idl.arch() self.ks_file = ozutil.generate_full_auto_path("fedora-" + update + "-jeos.ks") self.haverepo = haverepo self.installtype = idl.installtype()
eltoritodata = cdfile.read(count*2048)
eltoritodata = cdfile.read(count*512)
def checksum(data): s = 0 for i in range(0, len(data), 2): w = ord(data[i]) + (ord(data[i+1]) << 8) s = numpy.uint16(numpy.uint16(s) + numpy.uint16(w)) return s
initrdline = " append initrd=initrd.img ks=cdrom:/ks.cfg method="
initrdline = " append initrd=initrd.img ks=cdrom:/ks.cfg"
def modify_iso(self): self.log.debug("Putting the kickstart in place")
initrdline += "cdrom:/dev/cdrom\n"
initrdline += "\n"
def modify_iso(self): self.log.debug("Putting the kickstart in place")
if self.haverepo: initrdline += " repo=" else: initrdline += " method="
def modify_iso(self): self.log.debug("Putting the kickstart in place")
packages = cdl.newChild(None, "packages", None)
def output_cdl_xml(self, lines, services): doc = libxml2.newDoc("1.0") cdl = doc.newChild(None, "cdl", None) packages = cdl.newChild(None, "packages", None)
packages.newChild(None, "package", None) packages.setProp("name", line)
package = packages.newChild(None, "package", None) package.setProp("name", line)
def output_cdl_xml(self, lines, services): doc = libxml2.newDoc("1.0") cdl = doc.newChild(None, "cdl", None) packages = cdl.newChild(None, "packages", None)
self.log.debug("Name: %s, UUID: %s, MAC: %s, distro: %s" % (self.name, self.uuid, self.macaddr, self.distro))
self.log.debug("Name: %s, UUID: %s" % (self.name, self.uuid)) self.log.debug("MAC: %s, distro: %s" % (self.macaddr, self.distro))
def __init__(self, distro, update, arch, nicmodel, clockoffset, mousetype, diskbus, config): if arch != "i386" and arch != "x86_64": raise Exception, "Unsupported guest arch " + arch self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.uuid = uuid.uuid4() mac = [0x52, 0x54, 0x00, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] self.macaddr = ':'.join(map(lambda x:"%02x" % x, mac)) self.distro = distro self.update = update self.arch = arch self.name = self.distro + self.update + self.arch
"-o", output, inputdir])
"-o", self.output_iso, self.iso_contents])
def generate_new_iso(self): self.log.info("Generating new ISO") Guest.subprocess_check_output(["mkisofs", "-r", "-V", "Custom", "-cache-inodes", "-J", "-l", "-b", "isolinux/isolinux.bin", "-c", "isolinux/boot.cat", "-no-emul-boot", "-boot-load-size", "4", "-boot-info-table", "-v", "-v", "-o", output, inputdir])
def targetDev(self, doc, type, path, bus):
def targetDev(self, doc, devicetype, path, bus):
def targetDev(self, doc, type, path, bus): installNode = doc.createElement("disk") installNode.setAttribute("type", "file") installNode.setAttribute("device", type) sourceInstallNode = doc.createElement("source") sourceInstallNode.setAttribute("file", path) installNode.appendChild(sourceInstallNode) targetInstallNode = doc.createElement("target") targetInstallNode.setAttribute("dev", bus) installNode.appendChild(targetInstallNode) return installNode
installNode.setAttribute("device", type)
installNode.setAttribute("device", devicetype)
def targetDev(self, doc, type, path, bus): installNode = doc.createElement("disk") installNode.setAttribute("type", "file") installNode.setAttribute("device", type) sourceInstallNode = doc.createElement("source") sourceInstallNode.setAttribute("file", path) installNode.appendChild(sourceInstallNode) targetInstallNode = doc.createElement("target") targetInstallNode.setAttribute("dev", bus) installNode.appendChild(targetInstallNode) return installNode
dl.ReplyToThread(game.threadId, ":redhammer: " + (msg % name))
dl.ReplyToThread(game.threadId, ":redhammer: " + (message % name))
def PostLynchedMessage(self, game, name): if not game.post_lynches: return
current_day = models.IntegerField() living_count = models.IntegerField() players_count = models.IntegerField()
current_day = models.IntegerField(default=1) living_count = models.IntegerField(default=0) players_count = models.IntegerField(default=0)
def twitter_in_bg(msg): consumer_key = "r11W5M2m2tdNtcknWSjNKw" consumer_secret = "zSd0vCV2mTcWVyKfAKiIcm6gdzozLEVMjXXpT51XV3c" oauth_token = "166303978-z0Dp7pAoKrgs2ZjN7rCmwVmd9zum4LaUNjH5fzhJ" oauth_token_secret = "YK7cOAF7HcSbvn43EPGBgSXyQ5RWIaruYAfTz3lNpwE" twitter = OAuthApi(consumer_key, consumer_secret, oauth_token, oauth_token_secret) twitter.UpdateStatus(msg)
inputs['disablesmilies'] = 'no'
del inputs['disablesmilies']
def ReplyToThread(self, thread, message): getUrl = "http://forums.somethingawful.com/newreply.php?action=newreply&threadid=%s" % thread postUrl = "http://forums.somethingawful.com/newreply.php?action=newreply"
a.disconnect() a.connect("localhost", 6000, sys.argv[1])
def think(self): """ Performs a single step of thinking for our agent. Gets called on every iteration of our think loop. """
def __init__(self, world_model):
def __init__(self, world_model, body_model):
def __init__(self, world_model): self.world_model = world_model
msg = "(kick %.10f %.10f)"
msg = "(kick %.10f %.10f)" % (power, relative_direction)
def kick(self, power, relative_direction): """ Accelerates the ball with the given power in the given direction, relative to the current direction of the player's body. """
setattr(self, item, lambda *args: bork())
setattr(self, item, lambda *args: destroyed_method())
def destroyed_method(): """ Raises an error, no matter the arguments given. """ m = ("Agent has been disconnected and no longer supports " "any method calls.") raise NotImplementedError(m)
super.__init__(self, distance, direction)
GameObject.__init__(self, distance, direction)
def __init__(self, distance, direction, position): """ Field objects have a position in addition to GameObject's members. """
super.__init__(self, distance, direction)
GameObject.__init__(self, distance, direction)
def __init__(self, distance, direction, line_id): self.line_id = line_id super.__init__(self, distance, direction)
super.__init__(self, distance, direction, position)
FieldObject.__init__(self, distance, direction, position)
def __init__(self, distance, direction, position, dist_change, dir_change, speed): """ Adds variables for velocity vector deltas. """
super.__init__(self, distance, direction, position)
FieldObject.__init__(self, distance, direction, position)
def __init__(self, distance, direction, position):
super.__init__(self, distance, direction, position, dist_change,
MobileObject.__init__(self, distance, direction, position, dist_change,
def __init__(self, distance, direction, position, dist_change, dir_change, speed): super.__init__(self, distance, direction, position, dist_change, dir_change, speed)
super.__init__(self, distance, direction, position, dist_change,
MobileObject.__init__(self, distance, direction, position, dist_change,
def __init__(self, distance, direction, position, dist_change, dir_change, speed, team, side, uniform_number, body_direction, face_direction, neck_direction): """ Adds player-specific information to a mobile object. """
super.__init__(self, distance, direction, position)
StationaryObject.__init__(self, distance, direction, position)
def __init__(self, distance, direction, position, marker_id): """ Adds a marker id for this field object. Every marker has a unique id. """
for agent in xrange(max(11, int(sys.argv[2]))):
for agent in xrange(min(11, int(sys.argv[2]))):
def think(self): """ Performs a single step of thinking for our agent. Gets called on every iteration of our think loop. """
m = "Server returned an error: '%s'" % msg[1:]
m = "Server returned an error: '%s'" % msg[1]
def _handle_error(self, msg): """ Deals with error messages by raising them as exceptions. """
self.msg_handler = handler.MessageHandler(self.world)
self.msg_handler = handler.MessageHandler(self.world, self.body)
def __init__(self, host, port, teamname, version=11): """ Gives us a connection to the server as one player on a team. This immediately connects the agent to the server and starts receiving and parsing the information it sends. """
self.act_handler.move(1000, 1000)
if not self.moved: self.act_handler.move(1000, 1000) self.moved = True if self.world.ball is not None: if self.world.ball.distance <= 1: self.act_handler.kick(100, random.randint(0, 181) - 180) return elif -5 < self.world.ball.direction < 5: self.act_handler.dash(100) return else: self.act_handler.turn(self.world.ball.direction) return else: self.act_handler.turn(10 + random.randint(0, 10)) return
def think(self): """ Performs a single step of thinking for our agent. Gets called on every iteration of our think loop. """
parse_message(line.strip())
parse(line.strip())
def stringify(itemlist): """ Takes a list generated by parsing a message and condenses the characters it contains into strings, seperating them by spaces and leaving them alongside the lists therin. """ result = [] s = "" for item in itemlist: if item == " ": if s != "": result.append(s) s = "" elif type(item) is type(""): s += item elif type(item) is type([]): if s != "": result.append(s) s = "" result.append(stringify(item)) if s != "": result.append(s) return result
for i, (vertex1_label, vertex2_label, edge ) in enumerate( edges_from_osmdb( osmdb, vertices_namespace, slogs, profiledb ) ):
for i, (vertex1_label, vertex2_label, edge ) in enumerate( edges_from_osmdb( osmdb, vertex_namespace, slogs, profiledb ) ):
def gdb_import_osm(gdb, osmdb, vertex_namespace, slogs, profiledb=None): cursor = gdb.get_cursor() n_edges = osmdb.count_edges() # for each edge in the osmdb for i, (vertex1_label, vertex2_label, edge ) in enumerate( edges_from_osmdb( osmdb, vertices_namespace, slogs, profiledb ) ): if i%(n_edges//100+1)==0: sys.stdout.write( "%d/%d edges loaded\r\n"%(i, n_edges)) gdb.add_vertex( vertex1_label, cursor ) gdb.add_vertex( vertex2_label, cursor ) gdb.add_edge( vertex1_label, vertex2_label, edge, cursor ) gdb.commit() print "indexing vertices..." gdb.index()
c = gdb.get_cursor()
def gdb_load_gtfsdb(gdb, agency_namespace, gtfsdb, cursor, agency_id=None, maxtrips=None, sample_date=None, reporter=sys.stdout): # determine which service periods run on the given day, if a day is given if sample_date is not None: sample_date = datetime.date( *parse_gtfs_date( sample_date ) ) acceptable_service_ids = gtfsdb.service_periods( sample_date ) print "Importing only service periods operating on %s: %s"%(sample_date, acceptable_service_ids) else: acceptable_service_ids = None compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter ) c = gdb.get_cursor() v_added = set([]) for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips, service_ids=acceptable_service_ids ): if fromv_label not in v_added: gdb.add_vertex( fromv_label, c ) v_added.add(fromv_label) if tov_label not in v_added: gdb.add_vertex( tov_label, c ) v_added.add(tov_label) gdb.add_edge( fromv_label, tov_label, edge, c ) gdb.commit()
gdb.add_vertex( fromv_label, c )
gdb.add_vertex( fromv_label, cursor )
def gdb_load_gtfsdb(gdb, agency_namespace, gtfsdb, cursor, agency_id=None, maxtrips=None, sample_date=None, reporter=sys.stdout): # determine which service periods run on the given day, if a day is given if sample_date is not None: sample_date = datetime.date( *parse_gtfs_date( sample_date ) ) acceptable_service_ids = gtfsdb.service_periods( sample_date ) print "Importing only service periods operating on %s: %s"%(sample_date, acceptable_service_ids) else: acceptable_service_ids = None compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter ) c = gdb.get_cursor() v_added = set([]) for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips, service_ids=acceptable_service_ids ): if fromv_label not in v_added: gdb.add_vertex( fromv_label, c ) v_added.add(fromv_label) if tov_label not in v_added: gdb.add_vertex( tov_label, c ) v_added.add(tov_label) gdb.add_edge( fromv_label, tov_label, edge, c ) gdb.commit()
gdb.add_vertex( tov_label, c )
gdb.add_vertex( tov_label, cursor )
def gdb_load_gtfsdb(gdb, agency_namespace, gtfsdb, cursor, agency_id=None, maxtrips=None, sample_date=None, reporter=sys.stdout): # determine which service periods run on the given day, if a day is given if sample_date is not None: sample_date = datetime.date( *parse_gtfs_date( sample_date ) ) acceptable_service_ids = gtfsdb.service_periods( sample_date ) print "Importing only service periods operating on %s: %s"%(sample_date, acceptable_service_ids) else: acceptable_service_ids = None compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter ) c = gdb.get_cursor() v_added = set([]) for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips, service_ids=acceptable_service_ids ): if fromv_label not in v_added: gdb.add_vertex( fromv_label, c ) v_added.add(fromv_label) if tov_label not in v_added: gdb.add_vertex( tov_label, c ) v_added.add(tov_label) gdb.add_edge( fromv_label, tov_label, edge, c ) gdb.commit()
gdb.add_edge( fromv_label, tov_label, edge, c ) gdb.commit()
gdb.add_edge( fromv_label, tov_label, edge, cursor )
def gdb_load_gtfsdb(gdb, agency_namespace, gtfsdb, cursor, agency_id=None, maxtrips=None, sample_date=None, reporter=sys.stdout): # determine which service periods run on the given day, if a day is given if sample_date is not None: sample_date = datetime.date( *parse_gtfs_date( sample_date ) ) acceptable_service_ids = gtfsdb.service_periods( sample_date ) print "Importing only service periods operating on %s: %s"%(sample_date, acceptable_service_ids) else: acceptable_service_ids = None compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter ) c = gdb.get_cursor() v_added = set([]) for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips, service_ids=acceptable_service_ids ): if fromv_label not in v_added: gdb.add_vertex( fromv_label, c ) v_added.add(fromv_label) if tov_label not in v_added: gdb.add_vertex( tov_label, c ) v_added.add(tov_label) gdb.add_edge( fromv_label, tov_label, edge, c ) gdb.commit()
if reporter and i%(n//100)==0: reporter.write( "%d/%d vertices dumped\n"%(i,n) )
if reporter and i%(nseg//100)==0: reporter.write( "%d/%d vertices dumped\n"%(i,n) )
def populate(self, graph, reporter=None): c = self.conn.cursor() n = len(graph.vertices) for i, vv in enumerate( graph.vertices ): if reporter and i%(n//100)==0: reporter.write( "%d/%d vertices dumped\n"%(i,n) ) c.execute( "INSERT INTO vertices VALUES (?)", (vv.label,) ) for ee in vv.outgoing: c.execute( "INSERT INTO edges VALUES (?, ?, ?, ?)", (ee.from_v.label, ee.to_v.label, cPickle.dumps( ee.payload.__class__ ), cPickle.dumps( ee.payload.__getstate__() ) ) ) if hasattr(ee.payload, "__resources__"): for name, resource in ee.payload.__resources__(): self.store( name, resource, c ) self.conn.commit() c.close() self.index()
id = self.index.nearest( (lon, lat), 1 )[0]
id = list(self.index.nearest( (lon, lat), 1 ))[0]
def nearest_node(self, lat, lon, range=0.005): c = self.get_cursor() if self.index: #print "YOUR'RE USING THE INDEX" id = self.index.nearest( (lon, lat), 1 )[0] #print "THE ID IS %d"%id c.execute( "SELECT id, lat, lon FROM nodes WHERE id = ?", (id,) ) else: c.execute( "SELECT id, lat, lon FROM nodes WHERE endnode_refs > 1 AND lat > ? AND lat < ? AND lon > ? AND lon < ?", (lat-range, lat+range, lon-range, lon+range) ) dists = [(nid, nlat, nlon, ((nlat-lat)**2+(nlon-lon)**2)**0.5) for nid, nlat, nlon in c] if len(dists)==0: return (None, None, None, None) return min( dists, key = lambda x:x[3] )
TestHeadwayAlight,
def glen(gen): return len(list(gen))
if not dryrun: superself.add_way( self.currElem, c )
if not dryrun and accept(self.currElem.tags): superself.add_way( self.currElem, c )
def endElement(self,name): if name=='node': if superself.n_nodes%5000==0: print "node %d"%superself.n_nodes superself.n_nodes += 1 if not dryrun: superself.add_node( self.currElem, c ) elif name=='way': if superself.n_ways%5000==0: print "way %d"%superself.n_ways superself.n_ways += 1 if not dryrun: superself.add_way( self.currElem, c )
log.debug(gtfs_header)
print(gtfs_header)
def load_gtfs_table_to_sqlite(fp, gtfs_basename, cc, header=None, verbose=False): """header is iterable of (fieldname, fieldtype, processing_function). For example, (("stop_sequence", "INTEGER", int),). "TEXT" is default fieldtype. Default processing_function is lambda x:x""" ur = UTF8TextFile( fp ) rd = csv.reader( ur ) # create map of field locations in gtfs header to field locations as specified by the table definition gtfs_header = [x.strip() for x in rd.next()] log.debug(gtfs_header) gtfs_field_indices = dict(zip(gtfs_header, range(len(gtfs_header)))) field_name_locations = [gtfs_field_indices[field_name] if field_name in gtfs_field_indices else None for field_name, field_type, field_converter in header] field_converters = [field_definition[2] for field_definition in header] field_operator = list(zip(field_name_locations, field_converters)) # populate stoptimes table insert_template = 'insert into %s (%s) values (%s)'%(gtfs_basename,",".join([x[0] for x in header]), ",".join(["?"]*len(header))) log.debug( insert_template ) for i, line in enumerate(rd): #log.debug( i%50, line ) if i%5000==0: log.debug(i) # carry on quietly if there's a blank line in the csv if line == []: continue _line = [] for i, converter in field_operator: if i is not None and line[i].strip() != "": if converter: _line.append( converter(line[i].strip()) ) else: _line.append( line[i].strip() ) else: _line.append( None ) cc.execute(insert_template, _line)
log.debug( insert_template )
print( insert_template )
def load_gtfs_table_to_sqlite(fp, gtfs_basename, cc, header=None, verbose=False): """header is iterable of (fieldname, fieldtype, processing_function). For example, (("stop_sequence", "INTEGER", int),). "TEXT" is default fieldtype. Default processing_function is lambda x:x""" ur = UTF8TextFile( fp ) rd = csv.reader( ur ) # create map of field locations in gtfs header to field locations as specified by the table definition gtfs_header = [x.strip() for x in rd.next()] log.debug(gtfs_header) gtfs_field_indices = dict(zip(gtfs_header, range(len(gtfs_header)))) field_name_locations = [gtfs_field_indices[field_name] if field_name in gtfs_field_indices else None for field_name, field_type, field_converter in header] field_converters = [field_definition[2] for field_definition in header] field_operator = list(zip(field_name_locations, field_converters)) # populate stoptimes table insert_template = 'insert into %s (%s) values (%s)'%(gtfs_basename,",".join([x[0] for x in header]), ",".join(["?"]*len(header))) log.debug( insert_template ) for i, line in enumerate(rd): #log.debug( i%50, line ) if i%5000==0: log.debug(i) # carry on quietly if there's a blank line in the csv if line == []: continue _line = [] for i, converter in field_operator: if i is not None and line[i].strip() != "": if converter: _line.append( converter(line[i].strip()) ) else: _line.append( line[i].strip() ) else: _line.append( None ) cc.execute(insert_template, _line)
if i%5000==0: log.debug(i)
if i%5000==0: print(i)
def load_gtfs_table_to_sqlite(fp, gtfs_basename, cc, header=None, verbose=False): """header is iterable of (fieldname, fieldtype, processing_function). For example, (("stop_sequence", "INTEGER", int),). "TEXT" is default fieldtype. Default processing_function is lambda x:x""" ur = UTF8TextFile( fp ) rd = csv.reader( ur ) # create map of field locations in gtfs header to field locations as specified by the table definition gtfs_header = [x.strip() for x in rd.next()] log.debug(gtfs_header) gtfs_field_indices = dict(zip(gtfs_header, range(len(gtfs_header)))) field_name_locations = [gtfs_field_indices[field_name] if field_name in gtfs_field_indices else None for field_name, field_type, field_converter in header] field_converters = [field_definition[2] for field_definition in header] field_operator = list(zip(field_name_locations, field_converters)) # populate stoptimes table insert_template = 'insert into %s (%s) values (%s)'%(gtfs_basename,",".join([x[0] for x in header]), ",".join(["?"]*len(header))) log.debug( insert_template ) for i, line in enumerate(rd): #log.debug( i%50, line ) if i%5000==0: log.debug(i) # carry on quietly if there's a blank line in the csv if line == []: continue _line = [] for i, converter in field_operator: if i is not None and line[i].strip() != "": if converter: _line.append( converter(line[i].strip()) ) else: _line.append( line[i].strip() ) else: _line.append( None ) cc.execute(insert_template, _line)
log.init("gtfsdb")
def __init__(self, sqlite_filename, overwrite=False): log.init("gtfsdb") self.dbname = sqlite_filename if overwrite: try: os.remove(sqlite_filename) except: pass self.conn = sqlite3.connect( sqlite_filename )
log.debug( "skipping table %s - not included in 'tables' list"%tablename )
print( "skipping table %s - not included in 'tables' list"%tablename )
for tablename, table_def in self.GTFS_DEF: if tables is not None and tablename not in tables: log.debug( "skipping table %s - not included in 'tables' list"%tablename ) continue
log.debug( "creating table %s\n"%tablename )
print( "creating table %s\n"%tablename )
for tablename, table_def in self.GTFS_DEF: if tables is not None and tablename not in tables: log.debug( "skipping table %s - not included in 'tables' list"%tablename ) continue
log.debug( "loading table %s\n"%tablename )
print( "loading table %s\n"%tablename )
create_table( c, tablename, table_def )
log.debug( "NOTICE: GTFS feed has no file %s.txt, cannot load\n"%tablename )
print( "NOTICE: GTFS feed has no file %s.txt, cannot load\n"%tablename )
create_table( c, tablename, table_def )
log.debug("usage: python gtfsdb.py gtfsdb_filename [query]")
print("usage: python gtfsdb.py gtfsdb_filename [query]")
def main_inspect_gtfsdb(): from sys import argv if len(argv) < 2: log.debug("usage: python gtfsdb.py gtfsdb_filename [query]") exit() gtfsdb_filename = argv[1] gtfsdb = GTFSDatabase( gtfsdb_filename ) if len(argv) == 2: for table_name, fields in gtfsdb.GTFS_DEF: log.debug("Table: %s"%table_name) for field_name, field_type, field_converter in fields: log.debug("\t%s %s"%(field_type, field_name)) exit() query = argv[2] for record in gtfsdb.execute( query ): log.debug(record) #for stop_id, stop_name, stop_lat, stop_lon in gtfsdb.stops(): # log.debug( stop_lat, stop_lon ) # gtfsdb.nearby_stops( stop_lat, stop_lon, 0.05 ) # break #bundles = gtfsdb.compile_trip_bundles() #for bundle in bundles: # for departure_set in bundle.iter_departures("WKDY"): # log.debug( departure_set ) # # #log.debug( len(bundle.trip_ids) ) # sys.stdout.flush() pass
log.debug("Table: %s"%table_name)
print("Table: %s"%table_name)
def main_inspect_gtfsdb(): from sys import argv if len(argv) < 2: log.debug("usage: python gtfsdb.py gtfsdb_filename [query]") exit() gtfsdb_filename = argv[1] gtfsdb = GTFSDatabase( gtfsdb_filename ) if len(argv) == 2: for table_name, fields in gtfsdb.GTFS_DEF: log.debug("Table: %s"%table_name) for field_name, field_type, field_converter in fields: log.debug("\t%s %s"%(field_type, field_name)) exit() query = argv[2] for record in gtfsdb.execute( query ): log.debug(record) #for stop_id, stop_name, stop_lat, stop_lon in gtfsdb.stops(): # log.debug( stop_lat, stop_lon ) # gtfsdb.nearby_stops( stop_lat, stop_lon, 0.05 ) # break #bundles = gtfsdb.compile_trip_bundles() #for bundle in bundles: # for departure_set in bundle.iter_departures("WKDY"): # log.debug( departure_set ) # # #log.debug( len(bundle.trip_ids) ) # sys.stdout.flush() pass
log.debug("\t%s %s"%(field_type, field_name))
print("\t%s %s"%(field_type, field_name))
def main_inspect_gtfsdb(): from sys import argv if len(argv) < 2: log.debug("usage: python gtfsdb.py gtfsdb_filename [query]") exit() gtfsdb_filename = argv[1] gtfsdb = GTFSDatabase( gtfsdb_filename ) if len(argv) == 2: for table_name, fields in gtfsdb.GTFS_DEF: log.debug("Table: %s"%table_name) for field_name, field_type, field_converter in fields: log.debug("\t%s %s"%(field_type, field_name)) exit() query = argv[2] for record in gtfsdb.execute( query ): log.debug(record) #for stop_id, stop_name, stop_lat, stop_lon in gtfsdb.stops(): # log.debug( stop_lat, stop_lon ) # gtfsdb.nearby_stops( stop_lat, stop_lon, 0.05 ) # break #bundles = gtfsdb.compile_trip_bundles() #for bundle in bundles: # for departure_set in bundle.iter_departures("WKDY"): # log.debug( departure_set ) # # #log.debug( len(bundle.trip_ids) ) # sys.stdout.flush() pass
log.debug(record)
print(record)
def main_inspect_gtfsdb(): from sys import argv if len(argv) < 2: log.debug("usage: python gtfsdb.py gtfsdb_filename [query]") exit() gtfsdb_filename = argv[1] gtfsdb = GTFSDatabase( gtfsdb_filename ) if len(argv) == 2: for table_name, fields in gtfsdb.GTFS_DEF: log.debug("Table: %s"%table_name) for field_name, field_type, field_converter in fields: log.debug("\t%s %s"%(field_type, field_name)) exit() query = argv[2] for record in gtfsdb.execute( query ): log.debug(record) #for stop_id, stop_name, stop_lat, stop_lon in gtfsdb.stops(): # log.debug( stop_lat, stop_lon ) # gtfsdb.nearby_stops( stop_lat, stop_lon, 0.05 ) # break #bundles = gtfsdb.compile_trip_bundles() #for bundle in bundles: # for departure_set in bundle.iter_departures("WKDY"): # log.debug( departure_set ) # # #log.debug( len(bundle.trip_ids) ) # sys.stdout.flush() pass
log.debug("Converts GTFS file to GTFS-DB, which is super handy\nusage: python process_gtfs.py gtfs_filename gtfsdb_filename")
print("Converts GTFS file to GTFS-DB, which is super handy\nusage: python process_gtfs.py gtfs_filename gtfsdb_filename")
def main_build_gtfsdb(): parser = OptionParser() parser.add_option("-t", "--table", dest="tables", action="append", default=[], help="copy over only the given tables") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="make a bunch of noise" ) (options, args) = parser.parse_args() if len(options.tables)==0: options.tables=None if len(args) < 2: log.debug("Converts GTFS file to GTFS-DB, which is super handy\nusage: python process_gtfs.py gtfs_filename gtfsdb_filename") exit() gtfsdb_filename = args[1] gtfs_filename = args[0] gtfsdb = GTFSDatabase( gtfsdb_filename, overwrite=True ) gtfsdb.load_gtfs( gtfs_filename, options.tables, reporter=sys.stdout, verbose=options.verbose )
resource_count = list(c.execute( "SELECT count(*) FROM resources WHERE name = ?", (name,) ))[0][0]
resource_count = list(cc.execute( "SELECT count(*) FROM resources WHERE name = ?", (name,) ))[0][0]
def store(self, name, obj, c=None): cc = self.conn.cursor() if c is None else c resource_count = list(c.execute( "SELECT count(*) FROM resources WHERE name = ?", (name,) ))[0][0] if resource_count == 0: cc.execute( "INSERT INTO resources VALUES (?, ?)", (name, cPickle.dumps( obj )) ) if not c: self.conn.commit() if not c: cc.close()
(lgs.cpSoul, c_void_p, [LGSTypes.CustomPayload]),
(lgs.cpSoul, py_object, [LGSTypes.CustomPayload]),
def _declare(fun, restype, argtypes): fun.argtypes = argtypes fun.restype = restype fun.safe = True
lgs.vecExpand( addressof(address), amount )
lgs.vecExpand( addressof(self), amount )
def expand(self, amount): lgs.vecExpand( addressof(address), amount )
lgs.vecAdd( addressof(address), element )
lgs.vecAdd( addressof(self), element )
def add(self, element): lgs.vecAdd( addressof(address), element )
if min_transfer_time == None:
if min_transfer_time in ("", None):
def gtfsdb_to_transfer_edges( self ): # load transfers if self.reporter: self.reporter.write( "Loading transfers to graph...\n" ) # keep track to avoid redundancies # this assumes that transfer relationships are bi-directional. # TODO this implementation is also incomplete - it's theoretically possible that # a transfers.txt table could contain "A,A,3,", which would mean you can't transfer # at A. seen = set([]) for stop_id1, stop_id2, conn_type, min_transfer_time in self.gtfsdb.execute( "SELECT * FROM transfers" ): s1 = "sta-%s"%stop_id1 s2 = "sta-%s"%stop_id2 # TODO - what is the semantics of this? see note above if s1 == s2: continue key = ".".join(sorted([s1,s2])) if key not in seen: seen.add(key) else: continue assert conn_type == None or type(conn_type) == int if conn_type in (0, None): # This is a recommended transfer point between two routes if min_transfer_time == None: yield (s1, s2, Link()) yield (s2, s1, Link()) else: yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 1: # This is a timed transfer point between two routes yield (s1, s2, Link()) yield (s2, s1, Link()) elif conn_type == 2: # This transfer requires a minimum amount of time yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 3: # Transfers are not possible between routes at this location. assert False, "Support for no-transfer (transfers.txt transfer_type=3) not implemented."
yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time))
yield (s1, s2, ElapseTime(int(min_transfer_time))) yield (s2, s1, ElapseTime(int(min_transfer_time)))
def gtfsdb_to_transfer_edges( self ): # load transfers if self.reporter: self.reporter.write( "Loading transfers to graph...\n" ) # keep track to avoid redundancies # this assumes that transfer relationships are bi-directional. # TODO this implementation is also incomplete - it's theoretically possible that # a transfers.txt table could contain "A,A,3,", which would mean you can't transfer # at A. seen = set([]) for stop_id1, stop_id2, conn_type, min_transfer_time in self.gtfsdb.execute( "SELECT * FROM transfers" ): s1 = "sta-%s"%stop_id1 s2 = "sta-%s"%stop_id2 # TODO - what is the semantics of this? see note above if s1 == s2: continue key = ".".join(sorted([s1,s2])) if key not in seen: seen.add(key) else: continue assert conn_type == None or type(conn_type) == int if conn_type in (0, None): # This is a recommended transfer point between two routes if min_transfer_time == None: yield (s1, s2, Link()) yield (s2, s1, Link()) else: yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 1: # This is a timed transfer point between two routes yield (s1, s2, Link()) yield (s2, s1, Link()) elif conn_type == 2: # This transfer requires a minimum amount of time yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 3: # Transfers are not possible between routes at this location. assert False, "Support for no-transfer (transfers.txt transfer_type=3) not implemented."
yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time))
yield (s1, s2, int(ElapseTime(min_transfer_time))) yield (s2, s1, int(ElapseTime(min_transfer_time)))
def gtfsdb_to_transfer_edges( self ): # load transfers if self.reporter: self.reporter.write( "Loading transfers to graph...\n" ) # keep track to avoid redundancies # this assumes that transfer relationships are bi-directional. # TODO this implementation is also incomplete - it's theoretically possible that # a transfers.txt table could contain "A,A,3,", which would mean you can't transfer # at A. seen = set([]) for stop_id1, stop_id2, conn_type, min_transfer_time in self.gtfsdb.execute( "SELECT * FROM transfers" ): s1 = "sta-%s"%stop_id1 s2 = "sta-%s"%stop_id2 # TODO - what is the semantics of this? see note above if s1 == s2: continue key = ".".join(sorted([s1,s2])) if key not in seen: seen.add(key) else: continue assert conn_type == None or type(conn_type) == int if conn_type in (0, None): # This is a recommended transfer point between two routes if min_transfer_time == None: yield (s1, s2, Link()) yield (s2, s1, Link()) else: yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 1: # This is a timed transfer point between two routes yield (s1, s2, Link()) yield (s2, s1, Link()) elif conn_type == 2: # This transfer requires a minimum amount of time yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 3: # Transfers are not possible between routes at this location. assert False, "Support for no-transfer (transfers.txt transfer_type=3) not implemented."
assert False, "Support for no-transfer (transfers.txt transfer_type=3) not implemented."
print "WARNING: Support for no-transfer (transfers.txt transfer_type=3) not implemented."
def gtfsdb_to_transfer_edges( self ): # load transfers if self.reporter: self.reporter.write( "Loading transfers to graph...\n" ) # keep track to avoid redundancies # this assumes that transfer relationships are bi-directional. # TODO this implementation is also incomplete - it's theoretically possible that # a transfers.txt table could contain "A,A,3,", which would mean you can't transfer # at A. seen = set([]) for stop_id1, stop_id2, conn_type, min_transfer_time in self.gtfsdb.execute( "SELECT * FROM transfers" ): s1 = "sta-%s"%stop_id1 s2 = "sta-%s"%stop_id2 # TODO - what is the semantics of this? see note above if s1 == s2: continue key = ".".join(sorted([s1,s2])) if key not in seen: seen.add(key) else: continue assert conn_type == None or type(conn_type) == int if conn_type in (0, None): # This is a recommended transfer point between two routes if min_transfer_time == None: yield (s1, s2, Link()) yield (s2, s1, Link()) else: yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 1: # This is a timed transfer point between two routes yield (s1, s2, Link()) yield (s2, s1, Link()) elif conn_type == 2: # This transfer requires a minimum amount of time yield (s1, s2, ElapseTime(min_transfer_time)) yield (s2, s1, ElapseTime(min_transfer_time)) elif conn_type == 3: # Transfers are not possible between routes at this location. assert False, "Support for no-transfer (transfers.txt transfer_type=3) not implemented."
c.execute( "INSERT INTO edges VALUES (?, ?, ?, ?)", (from_v_label, to_v_label, cPickle.dumps( payload.__class__ ), cPickle.dumps( payload.__getstate__() ) ) )
epid = self.put_edge_payload( payload, c ) c.execute( "INSERT INTO edges VALUES (?, ?, ?)", (from_v_label, to_v_label, epid) )
def add_edge(self, from_v_label, to_v_label, payload, outside_c=None): c = outside_c or self.conn.cursor() c.execute( "INSERT INTO edges VALUES (?, ?, ?, ?)", (from_v_label, to_v_label, cPickle.dumps( payload.__class__ ), cPickle.dumps( payload.__getstate__() ) ) ) if hasattr(payload, "__resources__"): for name, resource in payload.__resources__(): self.store( name, resource ) if outside_c is None: self.conn.commit() c.close()
if vertex_soul == NULL:
if vertex_soul is None:
def getVertex( self, i ): vertex_soul = lgs.pathGetVertex( addressof(self), i ) # reinterpret the error code as an exception if vertex_soul == NULL: raise IndexError("%d is out of bounds"%i) return Vertex.from_pointer( vertex_soul )
if edge_soul == NULL:
if edge_soul is None:
def getEdge( self, i ): edge_soul = lgs.pathGetEdge( addressof(self), i ) # reinterpret the error code as an exception if edge_soul == NULL: raise IndexError("%d is out of bounds"%i) return Edge.from_pointer( edge_soul )
return edge is not None and isinstance(edge.state, graphserver.core.TripAlight)
return edge is not None and isinstance(edge.payload, graphserver.core.TripAlight)
def applies_to(vertex1, edge, vertex2): return edge is not None and isinstance(edge.state, graphserver.core.TripAlight)
count = c_int()
count = c_long()
def vertices(self): self.check_destroyed() count = c_int() p_va = lgs.gVertices(self.soul, byref(count)) verts = [] arr = cast(p_va, POINTER(c_void_p)) # a bit of necessary voodoo for i in range(count.value): v = Vertex.from_pointer(arr[i]) verts.append(v) del arr libc.free(p_va) return verts
main()
def main(): usage = """usage: python dedupe.py <graphdb_filename>""" parser = OptionParser(usage=usage) (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() exit(-1) graphdb_filename = args[0] gtfsdb = GTFSDatabase( graphdb_filename ) query = """ SELECT count(*), monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date FROM calendar GROUP BY monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date""" duped_periods = gtfsdb.execute( query ) equivilants = [] for count, m,t,w,th,f,s,su,start_date,end_date in duped_periods: # no need to check for dupes if there's only one if count==1: continue #print count, m, t, w, th, f, s, su, start_date, end_date # get service_ids for this dow/start_date/end_date combination service_ids = [x[0] for x in list( gtfsdb.execute( "SELECT service_id FROM calendar where monday=? and tuesday=? and wednesday=? and thursday=? and friday=? and saturday=? and sunday=? and start_date=? and end_date=?", (m,t,w,th,f,s,su,start_date,end_date) ) ) ] # group by service periods with the same set of exceptions exception_set_grouper = {} for service_id in service_ids: exception_set = list(gtfsdb.execute( "SELECT date, exception_type FROM calendar_dates WHERE service_id=?", (service_id,) ) ) exception_set.sort() exception_set = tuple(exception_set) exception_set_grouper[exception_set] = exception_set_grouper.get(exception_set,[]) exception_set_grouper[exception_set].append( service_id ) # extend list of equivilants for i, exception_set_group in enumerate( exception_set_grouper.values() ): equivilants.append( ("%d%d%d%d%d%d%d-%s-%s-%d"%(m,t,w,th,f,s,su,start_date,end_date,i), exception_set_group) ) for new_name, old_names in equivilants: for old_name in old_names: print old_name, new_name c = gtfsdb.conn.cursor() c.execute( "UPDATE calendar SET service_id=? WHERE service_id=?", (new_name, old_name) ) c.execute( "UPDATE calendar_dates SET service_id=? WHERE service_id=?", (new_name, old_name) ) c.execute( "UPDATE trips SET service_id=? WHERE service_id=?", (new_name, old_name) ) gtfsdb.conn.commit() c.close()
(lgs.gAddVertices, None, [LGSTypes.Graph, c_char_p, c_int]),
(lgs.gAddVertices, None, [LGSTypes.Graph, POINTER(c_char_p), c_int]),
def _declare(fun, restype, argtypes): fun.argtypes = argtypes fun.restype = restype fun.safe = True
self.b *= 1.0 / sqrt(sqmag(self.b))
self.b.x *= 1.0 / sqrt(sqmag(self.b.x))
def normalize(self): self.b *= 1.0 / sqrt(sqmag(self.b))
img = hg.cvQueryFrame(self.cap)
img = hg.cvRetrieveFrame(self.cap)
def frame(self): if self.framepos == -1: raise Exception('call next before the first frame!') format = self.format img = hg.cvQueryFrame(self.cap) nchannels = 1 if format == FORMAT_GRAY else 3 shape = \ (img.height, img.width) if nchannels == 1 else \ (img.height, img.width, nchannels) if format == FORMAT_BGR: # default format frame = np.ndarray(shape = shape, dtype = np.uint8, buffer = img.imageData) if self.own_data: frame = frame.copy() return frame size = cv.cvSize(img.width, img.height) img2 = cv.cvCreateImage(size, 8, nchannels) cvt_type = -1 if format == FORMAT_GRAY: cvt_type = cv.CV_BGR2GRAY elif format == FORMAT_RGB: cvt_type = cv.CV_BGR2RGB elif format == FORMAT_HSV: cvt_type = cv.CV_BGR2HSV else: assert(0) cv.cvCvtColor(img, img2, cvt_type) frame = np.ndarray(shape = shape, dtype = np.uint8, buffer = img2.imageData) if self.own_data: frame = frame.copy() return frame
def __init__(self, *args):
def __init__(self, fname, format=FORMAT_RGB):
def __init__(self, *args): self.create_capture = hg.cvCreateFileCapture super(video_file, self).__init__(*args)
super(video_file, self).__init__(*args)
super(video_file, self).__init__(format, fname)
def __init__(self, *args): self.create_capture = hg.cvCreateFileCapture super(video_file, self).__init__(*args)
if pos < self.pos:
if pos < self.framepos:
def seek(self, pos): #if pos < self.pos: # self._destr_cap() # self._init_cap() if pos < self.pos: hg.cvSetCaptureProperty(self.cap, hg.CV_CAP_PROP_POS_FRAMES, 0.0) self.framepos = -1 return super(video_file, self).seek(pos)
def correlate(input, kernel, output=None, accumulate=False): out_shape = tuple(np.subtract(input.shape, kernel.shape) + 1) if output is None: output = np.zeros(out_shape, input.dtype) assert (out_shape == output.shape), "shapes don't match" uin = input for d, kd in enumerate(kernel.shape): uin = unfold(uin, d, kd, 1) m2kdotmk(uin, kernel, output, accumulate) return output
pass def gen_correlate_scipy(input, kernel, output=None, accumulate=False): y = sig_correlate(input, kernel, 'valid') if output is None: output = y elif accumulate: output += y else: output[:] = y return output def gen_correlate_noscipy(input, kernel, output=None, accumulate=False): out_shape = tuple(np.subtract(input.shape, kernel.shape) + 1) if output is None: output = np.zeros(out_shape, input.dtype) assert (out_shape == output.shape), "shapes don't match" uin = input for d, kd in enumerate(kernel.shape): uin = unfold(uin, d, kd, 1) m2kdotmk(uin, kernel, output, accumulate) return output if sig_correlate is None: correlate = gen_correlate_noscipy else: correlate = gen_correlate_scipy
def correlate(input, kernel, output=None, accumulate=False): y = sig_correlate(input, kernel, 'valid') if output is None: output = y elif accumulate: output += y else: output[:] = y return output
if pos < self.framepos:
if pos <= self.framepos:
def seek(self, pos): #if pos < self.framepos: # self._destr_cap() # self._init_cap() if pos < self.framepos: hg.cvSetCaptureProperty(self.cap, hg.CV_CAP_PROP_POS_FRAMES, 0.0) self.framepos = -1 return super(video_file, self).seek(pos)
self.tbl_jik = sp.asarray([(b,a,k) for (k,(a,b)) in e(conn_table)], int)
self.tbl_jik = sp.asarray([(b,a,k) for (k,(a,b)) in e(conn_table)], 'i')
def __init__(self, kernel_shape, conn_table): super(back_convolution, self).__init__(kernel_shape, conn_table)
def _step_direction(self, p):
def _perform_step(self, p, grad, coeff): states = p.states for (g, state) in zip(grad,states): state.x += coeff * g def _step_direction(self, p, dostep = True):
def iterstats(self): return {'grad norm': self.cur_grad_norm}