rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
self['sim_image'].set_from_file(self.Sim_Image)
self['sim_image'].set_from_file(self.sim_image)
def setup_view(self): self.get_top_widget().set_position(gtk.WIN_POS_CENTER_ON_PARENT) self['sim_image'].set_from_file(self.Sim_Image) self['payt_image'].set_from_file(self.Computer_Image) self['credit_card_image'].set_from_file(self.Modem_Image) self['voucher_image'].set_from_file(self.Betavine_Image)
logger.info("MSISDN from model cache %s: " % self.msisdn)
logger.info("MSISDN from model cache: %s" % self.msisdn)
def get_msisdn(self, cb): if self.msisdn: logger.info("MSISDN from model cache %s: " % self.msisdn) cb(self.msisdn) return
logger.info("MSISDN from gconf %s: " % msisdn)
logger.info("MSISDN from gconf: %s" % msisdn)
def get_imsi_cb(imsi): if imsi: msisdn = self.conf.get("sim/%s" % imsi, 'msisdn') if msisdn: logger.info("MSISDN from gconf %s: " % msisdn) self.msisdn = msisdn cb(self.msisdn) return
if 'ppp' in settings: uuid = settings['connection']['uuid'] ret[uuid] = ProfileModel(self, profile=profile, device_callable=self.device_callable, parent_model_callable=self.parent_model_callable)
uuid = settings['connection']['uuid'] ret[uuid] = ProfileModel(self, profile=profile, device_callable=self.device_callable, parent_model_callable=self.parent_model_callable)
def get_profiles(self): ret = {} for profile in self.manager.get_profiles(): settings = profile.get_settings() # filter out wlan profiles if 'ppp' in settings: uuid = settings['connection']['uuid'] ret[uuid] = ProfileModel(self, profile=profile, device_callable=self.device_callable, parent_model_callable=self.parent_model_callable) return ret
if other is None: return True return self.uuid != other.uuid
return not self.__eq__(other)
def __ne__(self, other): if other is None: return True return self.uuid != other.uuid
self.username = settings['gsm']['username']
self.username = settings['gsm'].get('username', '')
def _load_settings(self, settings): try: self.uuid = settings['connection']['uuid'] self.name = settings['connection']['id'] self.username = settings['gsm']['username'] self.apn = settings['gsm']['apn'] self.autoconnect = settings['connection'].get('autoconnect', False) self.static_dns = settings['ipv4'].get('ignore-auto-dns') if settings['ipv4'].get('dns', None): dns = settings['ipv4'].get('dns') self.primary_dns = dns[0] if len(dns) > 1: self.secondary_dns = dns[1]
if settings['gsm'].get('password', None):
if settings['gsm'].get('password') is not None:
def _load_settings(self, settings): try: self.uuid = settings['connection']['uuid'] self.name = settings['connection']['id'] self.username = settings['gsm']['username'] self.apn = settings['gsm']['apn'] self.autoconnect = settings['connection'].get('autoconnect', False) self.static_dns = settings['ipv4'].get('ignore-auto-dns') if settings['ipv4'].get('dns', None): dns = settings['ipv4'].get('dns') self.primary_dns = dns[0] if len(dns) > 1: self.secondary_dns = dns[1]
'band': self.band, 'username': self.username,
def save(self): props = { 'connection': { 'name': 'connection', 'id': self.name, 'type': 'gsm', 'uuid': self.uuid, 'autoconnect': self.autoconnect}, 'gsm': { 'name': 'gsm', 'band': self.band, 'username': self.username, 'number': '*99#', 'network-type': self.network_pref, 'apn': self.apn}, 'ppp': { 'name': 'ppp', 'refuse-pap': True, 'refuse-chap': True, 'refuse-eap': True, 'refuse-mschap': True, 'refuse-mschapv2': True}, 'serial': { 'name': 'serial', 'baud': 115200}, 'ipv4': { 'name': 'ipv4', 'addresses': [], 'method': 'auto', 'ignore-auto-dns': self.static_dns, 'routes': []}, }
'network-type': self.network_pref,
def save(self): props = { 'connection': { 'name': 'connection', 'id': self.name, 'type': 'gsm', 'uuid': self.uuid, 'autoconnect': self.autoconnect}, 'gsm': { 'name': 'gsm', 'band': self.band, 'username': self.username, 'number': '*99#', 'network-type': self.network_pref, 'apn': self.apn}, 'ppp': { 'name': 'ppp', 'refuse-pap': True, 'refuse-chap': True, 'refuse-eap': True, 'refuse-mschap': True, 'refuse-mschapv2': True}, 'serial': { 'name': 'serial', 'baud': 115200}, 'ipv4': { 'name': 'ipv4', 'addresses': [], 'method': 'auto', 'ignore-auto-dns': self.static_dns, 'routes': []}, }
if props['gsm']['band'] is None: del props['gsm']['band'] if props['gsm']['network-type'] is None: del props['gsm']['network-type']
def save(self): props = { 'connection': { 'name': 'connection', 'id': self.name, 'type': 'gsm', 'uuid': self.uuid, 'autoconnect': self.autoconnect}, 'gsm': { 'name': 'gsm', 'band': self.band, 'username': self.username, 'number': '*99#', 'network-type': self.network_pref, 'apn': self.apn}, 'ppp': { 'name': 'ppp', 'refuse-pap': True, 'refuse-chap': True, 'refuse-eap': True, 'refuse-mschap': True, 'refuse-mschapv2': True}, 'serial': { 'name': 'serial', 'baud': 115200}, 'ipv4': { 'name': 'ipv4', 'addresses': [], 'method': 'auto', 'ignore-auto-dns': self.static_dns, 'routes': []}, }
logger.info("main-controller: property_device_value_change - device : " + self.model.device)
logger.info("main-controller: property_device_value_change")
def property_device_value_change(self, model, old, new): if self.model.device is not None: sm = self.model.device.connect_to_signal("DeviceEnabled", self.on_device_enabled_cb) self.signal_matches.append(sm) # connect to SIG_SMS_COMP and display SMS sm = self.model.device.connect_to_signal(SIG_SMS_COMP, self.on_sms_received_cb) self.signal_matches.append(sm) else: while self.signal_matches: sm = self.signal_matches.pop() sm.remove() self.view['connect_button'].set_sensitive(False) self.view['topup_tool_button'].set_sensitive(False) logger.info("main-controller: property_device_value_change - device : " + self.model.device)
if 1:
if 0:
def nick_debug(s): if 1: with open(BCM_FAST_LOG, 'a', 0) as f: f.write("%s\n" % s)
logger.info("main.py: controller - property_profile value changed - begining method")
logger.info("main.py: controller - property_profile value changed" " - begining method")
def property_profile_required_value_change(self, model, old, new): logger.info("main.py: controller - property_profile value changed - begining method") if new: logger.info("main.py: controller - property_profile value changed - calling 'ask_for_new_profile' ") self.ask_for_new_profile()
logger.info("main.py: controller - property_profile value changed - calling 'ask_for_new_profile' ")
logger.info("main.py: controller - property_profile value " "changed - calling 'ask_for_new_profile' ")
def property_profile_required_value_change(self, model, old, new): logger.info("main.py: controller - property_profile value changed - begining method") if new: logger.info("main.py: controller - property_profile value changed - calling 'ask_for_new_profile' ") self.ask_for_new_profile()
self.view.set_disconnected(device_present=False)
self.view.set_disconnected()
def property_status_value_change(self, model, old, new): if new == _('Initialising'): self.view.set_initialising(True) elif new == _('No device'): self.view.set_disconnected(device_present=False) elif new in [_('Registered'), _('Roaming')]: self.view.set_initialising(False)
self['button2'].set_sensitive(sensitive)
self['voucher_button'].set_sensitive(sensitive)
def enable_send_button(self, sensitive): self['button2'].set_sensitive(sensitive)
if song['file'] in pl: client.play(pl.index(song['file']))
if(pl): idx = client.playlist().index(client.currentsong()['file']) client.addid(song['file'], idx+1) client.play(idx+1)
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if song['file'] in pl: client.play(pl.index(song['file'])) else: client.add(song['file']) client.play(len(pl)) #length one higher now, with added song client.close() client.disconnect()
client.play(len(pl))
client.play()
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if song['file'] in pl: client.play(pl.index(song['file'])) else: client.add(song['file']) client.play(len(pl)) #length one higher now, with added song client.close() client.disconnect()
first_frame=df.nframes-1, num_frames=1) sch0 = schedule[0]
first_frame=df.nframes-1, num_frames=1)[0]
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if song['file'] in pl: client.play(pl.index(song['file'])) else: client.add(song['file']) client.play(len(pl)) #length one higher now, with added song client.close() client.disconnect()
while lst > sch0[0]: try: sch0 = schedule.pop(0) except IndexError: print "Schedule out of commands! Do something! Forget about music!" sys.exit(1) print "Passed threshold", sch0[0], "<", lst, "for region", sch0[1] newsong = True if newsong: for region in songLookup.keys: if sch0[1].find(region) >= 0: print "New song!", sch0[1], "matches", region playSong(region) break else: print "Could not find song match for", sch0[1] time.sleep(5)
try: while lst > schedule[1][0]: schedule.pop(0) print "Passed threshold", schedule[0][0], "<", lst, \ "for region", schedule[0][1] newsong = True if newsong: for region in songLookup.iterkeys(): if schedule[0][1].find(region) >= 0: print "New song!", schedule[0][1], "matches", region playSong(region) break else: print "Could not find song match for", schedule[0][1] time.sleep(5) except IndexError: print "Schedule out of commands! Do something! Forget about music!" sys.exit(1)
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if song['file'] in pl: client.play(pl.index(song['file'])) else: client.add(song['file']) client.play(len(pl)) #length one higher now, with added song client.close() client.disconnect()
if(pl): idx = client.playlist().index(client.currentsong()['file'])
cur = client.currentsong() if(pl and cur): idx = client.playlist().index(cur['file'])
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if(pl): idx = client.playlist().index(client.currentsong()['file']) client.addid(song['file'], idx+1) client.play(idx+1) #length one higher now, with added song else: client.add(song['file']) client.play() client.close() client.disconnect()
client.play()
client.play(len(pl))
def playSong(key): client.connect(mpdHost, mpdPort) client.password(mpdPass) songs = songLookup[key] song = songs[random.randint(0, len(songs)-1)] pl = client.playlist() if(pl): idx = client.playlist().index(client.currentsong()['file']) client.addid(song['file'], idx+1) client.play(idx+1) #length one higher now, with added song else: client.add(song['file']) client.play() client.close() client.disconnect()
value = str(value) + ' ' + DateTime().localZone()
value = str(value) + ' +00:00'
def retrieveData(self): sfbc = getToolByName(self.context, 'portal_salesforcebaseconnector') sfa = self.getRelevantSFAdapter() if sfa is None: return {} sObjectType = sfa.getSFObjectType() econtext = getExprContext(sfa) econtext.setGlobal('sanitize_soql', sanitize_soql) updateMatchExpression = sfa.getUpdateMatchExpression(expression_context = econtext) mappings = sfa.getFieldMap()
try: self.sse_supported.index (TARGET [2]) if check_cflags ("-mfpmath=sse", "CFLAGS"): add_config_h ("TUNE_SSE") except: pass
def startup (self): global CXXFLAGS, TARGET
cpu = platform.processor () if (not cpu) or (len (cpu) == 0): cpu = platform.machine () TARGET.append (cpu.replace ("-", "_"))
TARGET.append (platform.machine ())
def detect_platform (): global HOST, TARGET, DEVNULL if sys.platform [:3] == "win": HOST = ["windows"] TARGET = ["windows"] DEVNULL = "nul" elif sys.platform [:6] == "darwin": HOST = ["mac"] TARGET = ["mac"] DEVNULL = "/dev/null" else: # Default to POSIX HOST = ["posix"] TARGET = ["posix"] DEVNULL = "/dev/null" arch = platform.machine () # Python 2.5 on windows returns empty string here if (arch == "") or \ (len (arch) >= 4 and arch [0] == "i" and arch [2:4] == "86" and arch [4:] == ""): arch = "x86" HOST.append (arch) TARGET.append (arch) cpu = platform.processor () if (not cpu) or (len (cpu) == 0): cpu = platform.machine () TARGET.append (cpu.replace ("-", "_")) # HOST contains ["platform", "arch"] # TARGET contains ["platform", "arch", "tune"]
if var == "CFLAGS":
if var.endswith ("CFLAGS"):
def check_cflags (cflags, var, xcflags = ""): """Check if compiler supports certain flags. This is done by creating a dummy source file and trying to compile it using the requested flags. :Parameters: `cflags` : str The compiler flags to check for `var` : str The name of the variable to append the flags to in the case if the flags are supported (e.g. "CFLAGS", "CXXFLAGS" etc) `xcflags` : str Additional flags to use during test compilation. These won't be appended to var. :Returns: True if the flags are supported, False if not. """ check_started ("Checking if compiler supports " + cflags) if var == "CFLAGS": srcf = "conftest.c" else: srcf = "conftest.cpp" write_file (srcf, """
add_config_h ("HAVE_" + make_identifier (hdr))
add_config_h ("HAVE_" + make_identifier (hdr.replace (".", "_")))
def check_header (hdr, cflags = None, reqtext = None): """Check if a header file is available to the compiler. This is done by creating a dummy source file and trying to compile it. :Parameters: `hdr` : str The name of the header file to check for `reqtext` : str The message to print in the fatal error message when the header file is not available. If it is None, the missing file is not considered a fatal error. :Returns: True if header file exists, False if not. """ rc = False check_started ("Checking for header file " + hdr) write_file ("conftest.c", """
"gcc": lambda: compiler_gcc (), "msvc": lambda: compiler_msvc (),
"GCC": lambda: compiler_gcc (), "MSVC": lambda: compiler_msvc (),
def linklib (self, library, path = None): tmp = "" if path: tmp = "-libpath:" + path.replace ('/', '\\\\') + " " return tmp + ".lib ".join (library.split ()) + ".lib"
global HOST, TARGET, EXE, TOOLKIT
global HOST, TARGET, EXE, TOOLKIT, COMPILER
def start (): global HOST, TARGET, EXE, TOOLKIT global PREFIX, BINDIR, LIBDIR, SYSCONFDIR, DATADIR, DOCDIR global INCLUDEDIR, LIBEXECDIR, SHAREDLIBS detect_platform () # Read environment variables first for e in ENVARS: globals () [e [0]] = os.getenv (e [0], e [1]) # Parse command-line skip_opt = False for optidx in range (1, len (sys.argv)): if skip_opt: skip_opt = False continue opt = sys.argv [optidx] optarg = None opt_ok = False if opt [:2] == "--": opt = opt [2:] opt_short = False for o in OPTIONS: if o [1] and o [1] == opt [:len (o [1])]: optarg = opt [len (o [1]):] if optarg [:1] == '=' or len (opt) == len (o [1]): opt_ok = True optarg = optarg [1:] break elif opt [:1] == "-": opt = opt [1:] opt_short = True for o in OPTIONS: if o [0] and o [0] == opt: opt_ok = True break if not opt_ok: print "Unknown command-line option: '" + opt + "'" sys.exit (1) # Check if option needs argument if o [2] and opt_short: if optidx >= len (sys.argv): print "Option '" + opt + "' needs an argument" sys.exit (1) skip_opt = True optarg = sys.argv [optidx + 1] if not o [2] and optarg: print "Option '" + opt + "' does not accept an argument" sys.exit (1) exec o [3] # Print the host and target platforms print "Compiling on host " + ".".join (HOST) + " for target " + \ ".".join (TARGET [:2]) + " (tune for " + TARGET [2] + ")" # Now set target-specific defaults if TARGET [0] == "windows": EXE = ".exe" else: EXE = "" add_config_h ("CONF_PACKAGE", '"' + PROJ + '"') add_config_mak ("CONF_PACKAGE", PROJ) add_config_h ("CONF_VERSION", '"' + VERSION + '"') add_config_mak ("CONF_VERSION", VERSION) add_config_h ("PLATFORM_" + TARGET [0].upper ()) add_config_h ("ARCH_" + TARGET [1].upper ()) add_config_h ("TUNE_" + TARGET [2].upper ()) add_config_mak ("HOST", HOST [0]) add_config_mak ("TARGET", TARGET [0]) add_config_mak ("ARCH", TARGET [1]) add_config_mak ("TUNE", TARGET [2]) if BINDIR is None: BINDIR = PREFIX + "/bin" if SYSCONFDIR is None: SYSCONFDIR = PREFIX + "/etc/" + PROJ if DATADIR is None: DATADIR = PREFIX + "/share/" + PROJ if DOCDIR is None: DOCDIR = PREFIX + "/share/doc/" + PROJ + "-" + VERSION # http://www.pathname.com/fhs/pub/fhs-2.3.html#LIB64 if LIBDIR is None: if TARGET [1] [-2:] == "64": LIBDIR = PREFIX + "/lib64" # Debian doesn't follow LFS in this regard try: os.stat (LIBDIR) except: LIBDIR = PREFIX + "/lib" else: LIBDIR = PREFIX + "/lib" if INCLUDEDIR is None: INCLUDEDIR = PREFIX + "/include" if LIBEXECDIR is None: LIBEXECDIR = PREFIX + "/libexec/" + PROJ # Instantiate the compiler-dependent class TOOLKIT = COMPILERS.get (COMPILER); if not TOOLKIT: print "Unsupported compiler: " + COMPILER sys.exit (1) TOOLKIT = TOOLKIT () TOOLKIT.startup () add_config_h ("CONF_COMPILER_" + COMPILER) if PREFIX != "": add_config_h ("CONF_PREFIX", '"' + PREFIX + '"') add_config_mak ("CONF_PREFIX", PREFIX + '/') if BINDIR != "": add_config_h ("CONF_BINDIR", '"' + BINDIR + '"') add_config_mak ("CONF_BINDIR", BINDIR + '/') if SYSCONFDIR != "": add_config_h ("CONF_SYSCONFDIR", '"' + SYSCONFDIR + '"') add_config_mak ("CONF_SYSCONFDIR", SYSCONFDIR + '/') if DATADIR != "": add_config_h ("CONF_DATADIR", '"' + DATADIR + '"') add_config_mak ("CONF_DATADIR", DATADIR + '/') if LIBDIR != "": add_config_h ("CONF_LIBDIR", '"' + LIBDIR + '"') add_config_mak ("CONF_LIBDIR", LIBDIR + '/') if INCLUDEDIR != "": add_config_h ("CONF_INCLUDEDIR", '"' + INCLUDEDIR + '"') add_config_mak ("CONF_INCLUDEDIR", INCLUDEDIR + '/') if DOCDIR != "": add_config_h ("CONF_DOCDIR", '"' + DOCDIR + '"') add_config_mak ("CONF_DOCDIR", DOCDIR + '/') if LIBEXECDIR != "": add_config_h ("CONF_LIBEXECDIR", '"' + LIBEXECDIR + '"') add_config_mak ("CONF_LIBEXECDIR", LIBEXECDIR + '/') if SHAREDLIBS: add_config_h ("CONF_SHAREDLIBS", "1") add_config_mak ("SHAREDLIBS", "1") else: add_config_h ("CONF_SHAREDLIBS", "0") add_config_mak ("SHAREDLIBS", "")
cpu = platform.processor ().replace ("-", "_") TARGET.append (cpu)
cpu = platform.processor () if (not cpu) or (len (cpu) == 0): cpu = platform.machine () TARGET.append (cpu.replace ("-", "_"))
def detect_platform (): global HOST, TARGET, DEVNULL if sys.platform [:3] == "win": HOST = ["windows"] TARGET = ["windows"] DEVNULL = "nul" elif sys.platform [:6] == "darwin": HOST = ["mac"] TARGET = ["mac"] DEVNULL = "/dev/null" else: # Default to POSIX HOST = ["posix"] TARGET = ["posix"] DEVNULL = "/dev/null" arch = platform.machine () # Python 2.5 on windows returns empty string here if (arch == "") or \ (len (arch) >= 4 and arch [0] == "i" and arch [2:4] == "86" and arch [4:] == ""): arch = "x86" HOST.append (arch) TARGET.append (arch) cpu = platform.processor ().replace ("-", "_") TARGET.append (cpu) # HOST contains ["platform", "arch"] # TARGET contains ["platform", "arch", "tune"]
global CONFIG_H
global CONFIG_H, _CONFIG_H
def add_config_h (macro, val = "1"): global CONFIG_H macro = macro.strip () CONFIG_H [macro] = val.strip () _CONFIG_H.append (macro)
global CONFIG_MAK
global CONFIG_MAK, _CONFIG_MAK
def add_config_mak (macro, val = "1"): global CONFIG_MAK macro = macro.strip () CONFIG_MAK [macro] = val.strip () _CONFIG_MAK.append (macro)
line = fd.readline ().strip ()
lines = fd.readlines ()
def check_program (name, prog, ver_regex, req_version, failifnot = False): check_started ("Checking for " + name + " >= " + req_version) rc = False version = None try: fd = os.popen (prog + " 2>&1") line = fd.readline ().strip () fd.close () if VERBOSE: print "\n# '" + prog + "' returned '" + line + "'" m = re.match (ver_regex, line) if not m: raise version = m.group (1) if not compare_version (version, req_version): raise check_finished (version + ", OK") rc = True except: if version: check_finished (version + " < " + req_version + ", FAILED") else: check_finished ("FAILED") if not rc and failifnot: print "\n" + name + " version " + req_version + " and above is required to build this project" sys.exit (1) return rc
if VERBOSE: print "\n m = re.match (ver_regex, line)
for line in lines: line = line.strip () if VERBOSE: print "\n m = re.match (ver_regex, line) if m: version = m.group (1) if not compare_version (version, req_version): raise break
def check_program (name, prog, ver_regex, req_version, failifnot = False): check_started ("Checking for " + name + " >= " + req_version) rc = False version = None try: fd = os.popen (prog + " 2>&1") line = fd.readline ().strip () fd.close () if VERBOSE: print "\n# '" + prog + "' returned '" + line + "'" m = re.match (ver_regex, line) if not m: raise version = m.group (1) if not compare_version (version, req_version): raise check_finished (version + ", OK") rc = True except: if version: check_finished (version + " < " + req_version + ", FAILED") else: check_finished ("FAILED") if not rc and failifnot: print "\n" + name + " version " + req_version + " and above is required to build this project" sys.exit (1) return rc
raise version = m.group (1) if not compare_version (version, req_version):
def check_program (name, prog, ver_regex, req_version, failifnot = False): check_started ("Checking for " + name + " >= " + req_version) rc = False version = None try: fd = os.popen (prog + " 2>&1") line = fd.readline ().strip () fd.close () if VERBOSE: print "\n# '" + prog + "' returned '" + line + "'" m = re.match (ver_regex, line) if not m: raise version = m.group (1) if not compare_version (version, req_version): raise check_finished (version + ", OK") rc = True except: if version: check_finished (version + " < " + req_version + ", FAILED") else: check_finished ("FAILED") if not rc and failifnot: print "\n" + name + " version " + req_version + " and above is required to build this project" sys.exit (1) return rc
update_file (outfile, content)
return update_file (outfile, content)
def substmacros (infile, outfile = None, macros = None): if not outfile: if infile.endswith (".in"): outfile = infile [:-3] else: abort_configure (
self._chunks = []
def _read_member_header(self): """Fills self._chlen and self._chunks by the read header data. """ header = _read_gzip_header(self._fileobj) offset = self._fileobj.tell() if "RA" not in header["extra_field"]: raise IOError("Not an idzip file: %r" % self.name)
_read_member_header()
self._read_member_header()
def _readchunk(self, chunk_index): """Reads the specified chunk or throws EOFError. """ while chunk_index >= len(self._chunks): self._reach_member_end() _read_member_header()
extra += deobj.decompress(self.fileobj.read(3))
extra += deobj.decompress(self._fileobj.read(3))
def _reach_member_end(self): """Seeks the _fileobj at the end of the last known member. """ offset, comp_len = self._chunks[-1] self._fileobj.seek(offset + comp_len) # The zlib stream could end with an empty block. deobj = zlib.decompressobj(-zlib.MAX_WBITS) extra = "" while deobj.unused_data == "" and not extra: extra += deobj.decompress(self.fileobj.read(3))
comp_lenghts_pos = _prepare_header(output, in_size, basename, mtime)
comp_lengths_pos = _prepare_header(output, in_size, basename, mtime)
def _compress_member(input, in_size, output, basename, mtime): comp_lenghts_pos = _prepare_header(output, in_size, basename, mtime) comp_lengths = _compress_data(input, in_size, output) end_pos = output.tell() output.seek(comp_lenghts_pos) for comp_len in comp_lengths: _write16(output, comp_len) output.seek(end_pos)
output.seek(comp_lenghts_pos)
output.seek(comp_lengths_pos)
def _compress_member(input, in_size, output, basename, mtime): comp_lenghts_pos = _prepare_header(output, in_size, basename, mtime) comp_lengths = _compress_data(input, in_size, output) end_pos = output.tell() output.seek(comp_lenghts_pos) for comp_len in comp_lengths: _write16(output, comp_len) output.seek(end_pos)
comp_lenghts_pos = _write_extra_fields(output, in_size)
comp_lengths_pos = _write_extra_fields(output, in_size)
def _prepare_header(output, in_size, basename, mtime): """Returns a prepared gzip header StringIO. The gzip header is defined in RFC 1952. """ output.write("\x1f\x8b\x08") # Gzip-deflate identification flags = FEXTRA if basename: flags |= FNAME output.write(chr(flags)) # The mtime will be undefined if it does not fit. if mtime > 0xffffffffL: mtime = 0 _write32(output, mtime) deflate_flags = "\0" if COMPRESSION_LEVEL == zlib.Z_BEST_COMPRESSION: deflate_flags = "\x02" # slowest compression algorithm output.write(deflate_flags) output.write(chr(OS_CODE_UNIX)) comp_lenghts_pos = _write_extra_fields(output, in_size) if basename: output.write(basename + '\0') # original basename return comp_lenghts_pos
return comp_lenghts_pos
return comp_lengths_pos
def _prepare_header(output, in_size, basename, mtime): """Returns a prepared gzip header StringIO. The gzip header is defined in RFC 1952. """ output.write("\x1f\x8b\x08") # Gzip-deflate identification flags = FEXTRA if basename: flags |= FNAME output.write(chr(flags)) # The mtime will be undefined if it does not fit. if mtime > 0xffffffffL: mtime = 0 _write32(output, mtime) deflate_flags = "\0" if COMPRESSION_LEVEL == zlib.Z_BEST_COMPRESSION: deflate_flags = "\x02" # slowest compression algorithm output.write(deflate_flags) output.write(chr(OS_CODE_UNIX)) comp_lenghts_pos = _write_extra_fields(output, in_size) if basename: output.write(basename + '\0') # original basename return comp_lenghts_pos
comp_lenghts_pos = output.tell()
comp_lengths_pos = output.tell()
def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_length <= 0xffff _write16(output, extra_length) # XLEN # Dictzip extra field (Random Access) output.write("RA") _write16(output, field_length) _write16(output, 1) # version _write16(output, CHUNK_LENGTH) _write16(output, num_chunks) comp_lenghts_pos = output.tell() output.write("\0\0" * num_chunks) return comp_lenghts_pos
return comp_lenghts_pos
return comp_lengths_pos
def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_length <= 0xffff _write16(output, extra_length) # XLEN # Dictzip extra field (Random Access) output.write("RA") _write16(output, field_length) _write16(output, 1) # version _write16(output, CHUNK_LENGTH) _write16(output, num_chunks) comp_lenghts_pos = output.tell() output.write("\0\0" * num_chunks) return comp_lenghts_pos
output = open(filename + ".gz", "wb")
output = open(filename + SUFFIX, "wb")
def main(): args = sys.argv[1:] if len(args) == 0: print >>sys.stderr, __doc__ sys.exit(1) for filename in args: input = open(filename, "rb") inputinfo = os.fstat(input.fileno()) basename = os.path.basename(filename) output = open(filename + ".gz", "wb") compressor.compress(input, inputinfo.st_size, output, basename, int(inputinfo.st_mtime)) output.close() input.close()
self.cache_name = os.path.join(self._CondorDAGNode__job.cache_dir, "%s.cache" % self.get_name())
self.cache_name = os.path.join(self.cache_dir, "%s.cache" % self.get_name()) self.add_var_opt("input-cache", self.cache_name)
def set_name(self, *args): pipeline.CondorDAGNode.set_name(self, *args) self.cache_name = os.path.join(self._CondorDAGNode__job.cache_dir, "%s.cache" % self.get_name())
for c in cache: filename = c.path() pipeline.CondorDAGNode.add_file_arg(self, filename) self.add_output_file(filename)
def add_input_cache(self, cache): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" self.input_cache.extend(cache) for c in cache: filename = c.path() pipeline.CondorDAGNode.add_file_arg(self, filename) self.add_output_file(filename)
cache_entry.url = "file://localhost" + os.path.abspath(filename)
def set_output(self, description): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" cache_entry = power.make_cache_entry(self.input_cache, description, "") filename = os.path.join(self.output_dir, "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache_entry.description, int(cache_entry.segment[0]), int(abs(cache_entry.segment)))) self.add_var_opt("output", filename) cache_entry.url = "file://localhost" + os.path.abspath(filename) del self.output_cache[:] self.output_cache.append(cache_entry) return filename
for arg in self.get_args(): if "--add-from-cache" in arg: f = file(self.cache_name, "w") for c in self.input_cache: print >>f, str(c) pipeline.CondorDAGNode.write_input_files(self, *args) break
f = file(self.cache_name, "w") for c in self.input_cache: print >>f, str(c) pipeline.CondorDAGNode.write_input_files(self, *args)
def write_input_files(self, *args): # oh. my. god. this is fscked. for arg in self.get_args(): if "--add-from-cache" in arg: f = file(self.cache_name, "w") for c in self.input_cache: print >>f, str(c) pipeline.CondorDAGNode.write_input_files(self, *args) break
period = float(binjjob.get_opts()["time-step"]) / math.pi
period = float(binjjob.get_opts()["time-step"])
def make_binj_fragment(dag, seg, tag, offset, flow = None, fhigh = None): # one injection every time-step / pi seconds period = float(binjjob.get_opts()["time-step"]) / math.pi # adjust start time to be commensurate with injection period start = seg[0] - seg[0] % period + period * offset node = BurstInjNode(binjjob) node.set_start(start) node.set_end(seg[1]) if flow is not None: node.set_name("lalapps_binj_%s_%d_%d" % (tag, int(start), int(flow))) else: node.set_name("lalapps_binj_%s_%d" % (tag, int(start))) node.set_user_tag(tag) if flow is not None: node.add_macro("macroflow", flow) if fhigh is not None: node.add_macro("macrofhigh", fhigh) node.add_macro("macroseed", int(time.time() + start)) dag.add_node(node) return set([node])
class InspiralAnalysisJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
class InspiralAnalysisJob(pipeline.AnalysisJob, pipeline.CondorDAGJob):
def __init__(self, args=None): self.args = args
class InspiralAnalysisNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
class InspiralAnalysisNode(pipeline.AnalysisNode, pipeline.CondorDAGNode):
def __init__(self, cp, dax = False): """ @cp: a ConfigParser object from which the options are read. """ exec_name = 'inspinjfind' sections = ['inspinjfind'] extension = 'xml' InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax) self.add_condor_cmd('getenv', 'True') # overwrite standard log file names self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out') self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
filename = "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache_entry.description, int(cache_entry.segment[0]), int(abs(cache_entry.segment)))
filename = os.path.join(self.output_dir, "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache_entry.description, int(cache_entry.segment[0]), int(abs(cache_entry.segment))))
def set_output(self, description): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" cache_entry = power.make_cache_entry(self.input_cache, description, "") filename = "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache_entry.description, int(cache_entry.segment[0]), int(abs(cache_entry.segment))) self.add_var_opt("output", filename) cache_entry.url = "file://localhost" + os.path.abspath(filename) del self.output_cache[:] self.output_cache.append(cache_entry) return filename
self.triggers_dir = power.get_triggers_dir(config_parser)
self.output_dir = power.get_triggers_dir(config_parser)
def __init__(self,config_parser): """ config_parser = ConfigParser object from which options are read. """ pipeline.CondorDAGJob.__init__(self, power.get_universe(config_parser), power.get_executable(config_parser, "lalapps_StringSearch")) pipeline.AnalysisJob.__init__(self, config_parser) self.add_ini_opts(config_parser, "lalapps_StringSearch") self.set_stdout_file(os.path.join(power.get_out_dir(config_parser), "lalapps_StringSearch-$(cluster)-$(process).out")) self.set_stderr_file(os.path.join(power.get_out_dir(config_parser), "lalapps_StringSearch-$(cluster)-$(process).err")) self.set_sub_file("lalapps_StringSearch.sub") self.add_condor_cmd("Requirements", "Memory > 1100")
self.triggers_dir = self.job().triggers_dir
self.output_dir = os.path.join(os.getcwd(), self.job().output_dir)
def __init__(self,job): """ job = A CondorDAGJob that can run an instance of lalapps_StringSearch. """ pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self) self.__usertag = job.get_config('pipeline','user_tag') self.output_cache = [] self.triggers_dir = self.job().triggers_dir
self.set_output(os.path.join(self.triggers_dir, "%s-STRINGSEARCH_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
self.set_output(os.path.join(self.output_dir, "%s-STRINGSEARCH_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
def get_output(self): """ Returns the file name of output from the ring code. This must be kept synchronized with the name of the output file in ring.c. """ if self._AnalysisNode__output is None: if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag): raise ValueError, "start time, end time, ifo, or user tag has not been set" seg = segments.segment(LIGOTimeGPS(self.get_start()), LIGOTimeGPS(self.get_end())) self.set_output(os.path.join(self.triggers_dir, "%s-STRINGSEARCH_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
hwInjNode.set_output_file(os.path.join(hw_inj_dir, outfilename))
hwInjNode.set_output_file(outfilename)
def hwinj_page_setup(cp,ifos,veto_categories,hw_inj_dir): """ run ligolw_cbc_hardware injection page, soring the input and output in the subdirectory hardware_injection_summary """ hwInjNodes = [] hwinj_length = cp.getint("input","gps-end-time") - cp.getint("input","gps-start-time") hwInjJob = inspiral.HWinjPageJob(cp) veto_categories.append(None) for veto in veto_categories: if cp.get("pipeline","user-tag"): usertag = cp.get("pipeline", "user-tag") + "_" + "FULL_DATA" else: usertag = "FULL_DATA" if veto: usertag += "_CAT_" + str(veto) + "_VETO" cacheFile = hipe_cache( ifos, usertag, cp.getint("input", "gps-start-time"), cp.getint("input", "gps-end-time") ) if not os.path.isfile(os.path.join("full_data", cacheFile)): print>>sys.stderr, "WARNING: Cache file FULL_DATA/" + cacheFile print>>sys.stderr, "does not exist! This might cause later failures." outfilename = os.path.join(hw_inj_dir, ''.join(ifos) + '-HWINJ_SUMMARY') if veto: outfilename += '_CAT_' + str(veto) outfilename += '-' + cp.get("input","gps-start-time") + '-' + str(hwinj_length) + '.html' hwInjNode = inspiral.HWinjPageNode(hwInjJob) hwInjNode.set_start(cp.get("input","gps-start-time")) hwInjNode.set_end(cp.get("input","gps-end-time")) hwInjNode.set_input_cache(os.path.join('full_data', cacheFile)) hwInjNode.set_cache_string('*COIRE_SECOND*') hwInjNode.set_source_xml(os.path.join(hw_inj_dir,cp.get("hardware-injections", "hwinj-def-file"))) hwInjNode.set_segment_dir(hw_inj_dir) hwInjNode.set_output_file(os.path.join(hw_inj_dir, outfilename)) hwInjNode.add_var_opt('analyze-injections','') for ifo in ifos: hwInjNode.add_var_opt(ifo.lower()+'-injections','') hwInjNodes.append(hwInjNode) return hwInjNodes
self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
def __init__(self, cp, dax = False): """ cp: ConfigParser object from which options are read. """ exec_name = "mvsc_get_doubles" universe = "vanilla" executable = cp.get('condor',exec_name) pipeline.CondorDAGJob.__init__(self, universe, executable) pipeline.AnalysisJob.__init__(self, cp, dax) self.add_condor_cmd('getenv','True') self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes") self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out') self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err') self.set_sub_file(exec_name + '.sub')
for i in range(number):
for i in range(self.number):
def finalize(self): """ finalize the mvsc_get_doubles node """ self.add_var_opt("instruments", self.instruments) self.add_var_opt("trainingstr", self.trainingstr) self.add_var_opt("testingstr", self.testingstr) self.add_var_opt("zerolagstr", self.zerolagstr) for database in self.databases: self.add_file_arg(database) ifos = self.instruments.strip().split(',') ifos.sort() self.out_file_group = {} for i in range(number): trainingname = ''.join(ifos) + '_set' + str(i) + '_' + str(self.trainingstr) + '.pat' testingname = ''.join(ifos) + '_set' + str(i) + '_' + str(self.testingstr) + '.pat' infoname = ''.join(ifos) + '_set' + str(i) + '_' + str(self.testingstr) + '_info.pat' self.out_file_group[i] = ((trainingname), (testingname)) self.add_output_file(trainingname) self.add_output_file(testingname) self.add_output_file(infoname) self.zerolag_file = [''.join(ifos) + '_' + str(self.zerolagstr) + '.pat'] self.add_output_file(''.join(ifos) + '_' + str(self.zerolagstr) + '.pat') self.add_output_file(''.join(ifos) + '_' + str(self.zerolagstr) + '_info.pat')
self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
def __init__(self, cp, dax = False): """ cp: ConfigParser object from which options are read. """ exec_name = "mvsc_train_forest" universe = "vanilla" executable = cp.get('condor',exec_name) pipeline.CondorDAGJob.__init__(self, universe, executable) pipeline.AnalysisJob.__init__(self, cp, dax) self.add_condor_cmd('getenv','True') self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes") self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out') self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err') self.set_sub_file(exec_name + '.sub')
self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
def __init__(self, cp, dax = False):
self.add_file_arg("-A -a 1 %s %s %s" % (self.trainedforest, self.file_to_rank, self.ranked_file))
self.add_file_arg("-A -a 4 %s %s %s" % (self.trainedforest, self.file_to_rank, self.ranked_file))
def finalize(self): """ finalize the MvscUseForestNode """ self.ranked_file = self.file_to_rank.replace('.pat','.dat') self.add_file_arg("-A -a 1 %s %s %s" % (self.trainedforest, self.file_to_rank, self.ranked_file)) self.add_output_file(self.ranked_file)
class MvscUpdateSqlJob(pipeline.CondorDAGJob):
class MvscUpdateSqlJob(pipeline.AnalysisJob, pipeline.CondorDAGJob):
def finalize(self): """ finalize the MvscUseForestNode """ self.ranked_file = self.file_to_rank.replace('.pat','.dat') self.add_file_arg("-A -a 1 %s %s %s" % (self.trainedforest, self.file_to_rank, self.ranked_file)) self.add_output_file(self.ranked_file)
self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
def __init__(self, cp, dax = False): """ cp: ConfigParser object from which options are read. """ exec_name = "mvsc_update_sql" universe = "vanilla" executable = cp.get('condor',exec_name) pipeline.CondorDAGJob.__init__(self, universe, executable) pipeline.AnalysisJob.__init__(self, cp, dax) self.add_condor_cmd('getenv','True') self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes") self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out') self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err') self.set_sub_file(exec_name + '.sub')
class MvscUpdateSqlNode(pipeline.CondorDAGNode):
class MvscUpdateSqlNode(pipeline.AnalysisNode, pipeline.CondorDAGNode):
def __init__(self, cp, dax = False): """ cp: ConfigParser object from which options are read. """ exec_name = "mvsc_update_sql" universe = "vanilla" executable = cp.get('condor',exec_name) pipeline.CondorDAGJob.__init__(self, universe, executable) pipeline.AnalysisJob.__init__(self, cp, dax) self.add_condor_cmd('getenv','True') self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes") self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out') self.set_stderr_file('logs/' + exec_namee + '-$(cluster)-$(process).err') self.set_sub_file(exec_name + '.sub')
if (int(seg[0]) - seg[0]) / seg[0] > 1e-15 or (int(seg[1]) - seg[1]) / seg[1] > 1e-15:
if abs((int(seg[0]) - seg[0]) / seg[0]) > 1e-14 or abs((int(seg[1]) - seg[1]) / seg[1]) > 1e-14:
def clip_segment(seg, pad, short_segment_duration): # clip segment to the length required by lalapps_StringSearch. if # # duration = segment length - padding # # then # # duration / short_segment_duration - 0.5 # # must be an odd integer, therefore # # 2 * duration + short_segment_duration # # must be divisble by (4 * short_segment_duration) duration = float(abs(seg)) - 2 * pad extra = (2 * duration + short_segment_duration) % (4 * short_segment_duration) extra /= 2 # clip segment seg = segments.segment(seg[0], seg[1] - extra) # bounds must be integers if (int(seg[0]) - seg[0]) / seg[0] > 1e-15 or (int(seg[1]) - seg[1]) / seg[1] > 1e-15: raise ValueError, "segment %s does not have integer boundaries" % str(seg) seg = segments.segment(int(seg[0]), int(seg[1])) # done return seg
libraries=['boost_python-mt']
libraries=['boost_python']
def pkgconfig(*packages, **kw): import commands flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} for token in commands.getoutput("pkg-config --libs --cflags %s" % ' '.join(packages)).split(): kw.setdefault(flag_map.get(token[:2]), []).append(token[2:]) return kw
posidx=find(wt>maxwt+log(randoms))
posidx=[i for i in range(0,size(weights)) if wt[i]>maxwt+log(randoms[i]) ]
def nest2pos(samps,weights): randoms=rand(size(samps,0)) wt=weights+samps[:,-1] maxwt=max(wt) posidx=find(wt>maxwt+log(randoms)) pos=samps[posidx,:] return pos
hipeJob.set_pegasus_exec_dir(os.path.join( local_exec_dir, '/'.join(os.getcwd().split('/')[-2:])))
def test_and_add_hipe_arg(hipeCommand, hipe_arg): if config.has_option("hipe-arguments",hipe_arg): hipeCommand += "--" + hipe_arg + " " + \ config.get("hipe-arguments",hipe_arg) return(hipeCommand)
hipeJob.set_pegasus_exec_dir(os.path.join( local_exec_dir, '/'.join(os.getcwd().split('/')[-2:]), usertag))
def test_and_add_hipe_arg(hipeCommand, hipe_arg): if config.has_option("hipe-arguments",hipe_arg): hipeCommand += "--" + hipe_arg + " " + \ config.get("hipe-arguments",hipe_arg) return(hipeCommand)
plotcp.set("pipeline","bank-suffix",bankSuffix)
def plot_setup(plotDir, config, logPath, stage, injectionSuffix, zerolagSuffix, slideSuffix, bankSuffix, cacheFile, injdirType, tag = None, ifos = None, cat = 3): """ run lalapps_plot_hipe and add job to dag plotDir = directory in which to run inspiral hipe config = config file logPath = location where log files will be written stage = which stage to run (first, second or both) injectionSuffix = the string to restrict to for injections zerolagSuffix = the string to restrict to for zero lag slideSuffix = the string to restrict to for time slides bankSuffix = the string to restrict to for bank plots cacheFile = the input cache file for plotting tag = extra tag for naming """ # make the directory for running hipe mkdir(plotDir) plotcp = copy.deepcopy(config) # set details for the common section plotcp.add_section("common") plotcp.set("common","gps-start-time", plotcp.get("input","gps-start-time") ) plotcp.set("common","gps-end-time", plotcp.get("input","gps-end-time") ) plotcp.set("common","output-path", ".") plotcp.set("common","enable-output","") plotSections = ["common", "pipeline", "condor",\ "plotinspiral", "plotinspiral-meta", \ "plotthinca", "plotthinca-meta", \ "plotnumtemplates", "plotnumtemplates-meta", \ "plotinjnum", "plotinjnum-meta", \ "plotethinca", "plotethinca-meta", \ "plotinspmissed", "plotinspmissed-meta", \ "plotinspinj", "plotinspinj-meta", \ "plotsnrchi", "plotsnrchi-meta", \ "plotinspfound", \ "plotinspiralrange", "plotinspiralrange-meta", \ "ploteffdistcut", "ploteffdistcut-meta", \ "plotinspfound", "plotcoincmissed"] for seg in plotcp.sections(): if not seg in plotSections: plotcp.remove_section(seg) plotcp.remove_option("condor","hipe") plotcp.remove_option("condor","plot") plotcp.remove_option("condor","follow") # XXX Can't yet run the plotting codes in standard universe if plotcp.get("condor","universe") == "standard": plotcp.set("condor","universe","vanilla") # set the various suffixes in pipeline plotcp.set("pipeline","injection-suffix",injectionSuffix) plotcp.set("pipeline","inj-suffix",injectionSuffix) plotcp.set("pipeline","found-suffix",injectionSuffix) plotcp.set("pipeline","missed-suffix",injectionSuffix) plotcp.set("pipeline","bank-suffix",bankSuffix) plotcp.set("pipeline","trigbank-suffix",bankSuffix) plotcp.set("pipeline","zerolag-suffix",zerolagSuffix) plotcp.set("pipeline","trig-suffix",zerolagSuffix) plotcp.set("pipeline","coinc-suffix",zerolagSuffix) plotcp.set("pipeline","slide-suffix",slideSuffix) numSlides = slide_sanity(config, ("PLAYGROUND" in slideSuffix )) plotcp.set("pipeline","num-slides", numSlides) # Adding followup options to plotinspmissed analysisstart = plotcp.get("common","gps-start-time") analysisend = plotcp.get("common","gps-end-time") analysisduration = int(analysisend) - int(analysisstart) inspmissedVetoDir = "../segments" for ifo in ifos: if cat == 2: plotcp.set("plotinspmissed","followup-vetofile-" + ifo.lower(), inspmissedVetoDir + "/" + ifo + "-CATEGORY_" + str(cat) + "_VETO_SEGS-" + analysisstart + "-" + str(analysisduration) + ".txt") else: plotcp.set("plotinspmissed","followup-vetofile-" + ifo.lower(), inspmissedVetoDir + "/" + ifo + "-COMBINED_CAT_" + str(cat) + "_VETO_SEGS-" + analysisstart + "-" + str(analysisduration) + ".txt") # Adding followup option to plotinspfound and plotinspmissed plotcp.set("plotinspfound","followup-tag",injdirType) plotcp.set("plotinspmissed","followup-tag",injdirType) # Remove options if no slide or zero lag files are available. if "NONE_AVAILABLE" in slideSuffix: if plotcp.has_option('plotsnrchi-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','plotsnrchi') if plotcp.has_option('ploteffdistcut-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','ploteffdistcut') if plotcp.has_option('plotethinca-meta','slide-program-tag'): remove_plot_meta_option(plotcp,'slide','plotethinca') if "NONE_AVAILABLE" in zerolagSuffix: if plotcp.has_option('plotsnrchi-meta','trig-program-tag'): remove_plot_meta_option(plotcp,'trig','plotsnrchi') # set the user-tag if plotcp.get("pipeline","user-tag"): usertag = plotcp.get("pipeline","user-tag") plotcp.set("pipeline","input-user-tag",usertag) usertag += plotDir.upper() else: usertag = plotDir.upper() plotcp.set("pipeline","input-user-tag","") if tag: usertag += "_" + tag plotcp.set("pipeline","user-tag",usertag) plotcp.set("common","cache-file",cacheFile) # return to the directory, write ini file and run hipe os.chdir(plotDir) iniFile = "plot_hipe_" iniFile += plotDir if tag: iniFile += "_" + tag.lower() iniFile += ".ini" plotcp.write(file(iniFile,"w")) print "Running plot hipe in directory " + plotDir print "Using zero lag sieve: " + zerolagSuffix print "Using time slide sieve: " + slideSuffix print "Using injection sieve: " + injectionSuffix print "Using bank sieve: " + bankSuffix print # work out the hipe call: plotCommand = config.get("condor","plot") plotCommand += " --log-path " + logPath plotCommand += " --config-file " + iniFile plotCommand += " --priority 10" for item in config.items("ifo-details"): plotCommand += " --" + item[0] + " " + item[1] for item in config.items("plot-arguments"): plotCommand += " --" + item[0] + " " + item[1] if stage == "first" or stage == "both": plotCommand += " --first-stage" if stage == "second" or stage == "both": plotCommand += " --second-stage" # run lalapps_inspiral_hipe make_external_call(plotCommand) # make hipe job/node plotDag = iniFile.rstrip("ini") + usertag + ".dag" plotJob = pipeline.CondorDAGManJob(plotDag, plotDir) plotNode = pipeline.CondorDAGNode(plotJob) plotNode.set_user_tag(usertag) # return to the original directory os.chdir("..") return plotNode
def set_glob(self, file_glob): """ Sets the glob name """ self.add_var_opt('glob',file_glob) def set_input(self, input_file): """ Sets the input file name """ self.add_var_opt('input',input_file)
def get_ifo_tag(self): """ Returns the IFO tag string """ return self.__ifo_tag
self.daq = nds.daq(self.host, self.port)
self.daq = nds.daq(host, port)
def __init__(self, host, port): self.daq = nds.daq(self.host, self.port) self.channels = self.daq.recv_channel_list() self.channelLeaves, self.channelTree = make_channel_tree(self.channels) self.rates = tuple(sorted(set(int(c.rate) for c in self.channels))) self.channel_types = tuple(c for c in nds.channel_type.values.values() if c != nds.channel_type.unknown) self.selected_rates = frozenset(self.rates) self.selected_channel_types = frozenset(self.channel_types)
stdin, out, err = os.popen3(command) pid, status = os.wait() if status != 0:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=isinstance(command, str)) out, err = p.communicate() if p.returncode != 0:
def make_external_call(command, show_stdout=False, show_command=False): """ Run a program on the shell and print informative messages on failure. """ if show_command: print command stdin, out, err = os.popen3(command) pid, status = os.wait() if status != 0: print >>sys.stderr, "External call failed." print >>sys.stderr, " status: %d" % status print >>sys.stderr, " stdout: %s" % out.read() print >>sys.stderr, " stderr: %s" % err.read() print >>sys.stderr, " command: %s" % command sys.exit(status) if show_stdout: print out.read() stdin.close() out.close() err.close()
print >>sys.stderr, " status: %d" % status print >>sys.stderr, " stdout: %s" % out.read() print >>sys.stderr, " stderr: %s" % err.read() print >>sys.stderr, " command: %s" % command sys.exit(status)
print >>sys.stderr, " stdout: %s" % out print >>sys.stderr, " stderr: %s" % err raise subprocess.CalledProcessError(p.returncode, command)
def make_external_call(command, show_stdout=False, show_command=False): """ Run a program on the shell and print informative messages on failure. """ if show_command: print command stdin, out, err = os.popen3(command) pid, status = os.wait() if status != 0: print >>sys.stderr, "External call failed." print >>sys.stderr, " status: %d" % status print >>sys.stderr, " stdout: %s" % out.read() print >>sys.stderr, " stderr: %s" % err.read() print >>sys.stderr, " command: %s" % command sys.exit(status) if show_stdout: print out.read() stdin.close() out.close() err.close()
print out.read() stdin.close() out.close() err.close()
print out
def make_external_call(command, show_stdout=False, show_command=False): """ Run a program on the shell and print informative messages on failure. """ if show_command: print command stdin, out, err = os.popen3(command) pid, status = os.wait() if status != 0: print >>sys.stderr, "External call failed." print >>sys.stderr, " status: %d" % status print >>sys.stderr, " stdout: %s" % out.read() print >>sys.stderr, " stderr: %s" % err.read() print >>sys.stderr, " command: %s" % command sys.exit(status) if show_stdout: print out.read() stdin.close() out.close() err.close()
self.set_stdout_file(os.path.join(get_out_dir(config_parser), "lalapps_binj-$(macrochannelname)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).out")) self.set_stderr_file(os.path.join(get_out_dir(config_parser), "lalapps_binj-$(macrochannelname)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).err"))
self.set_stdout_file(os.path.join(get_out_dir(config_parser), "lalapps_binj-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).out")) self.set_stderr_file(os.path.join(get_out_dir(config_parser), "lalapps_binj-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).err"))
def __init__(self, config_parser): """ config_parser = ConfigParser object """ pipeline.CondorDAGJob.__init__(self, get_universe(config_parser), get_executable(config_parser, "lalapps_binj")) pipeline.AnalysisJob.__init__(self, config_parser)
lladdjob = pipeline.LigolwAddJob(get_out_dir(config_parser), config_parser)
lladdjob = pipeline.LigolwAddJob(os.path.join(get_out_dir(config_parser)), config_parser)
def init_job_types(config_parser, job_types = ("datafind", "rm", "binj", "power", "lladd", "binjfind", "bucluster", "bucut", "burca", "burca2", "sqlite", "burcatailor")): """ Construct definitions of the submit files. """ global datafindjob, rmjob, binjjob, powerjob, lladdjob, binjfindjob, buclusterjob, llb2mjob, bucutjob, burcajob, burca2job, sqlitejob, burcatailorjob # ligo_data_find if "datafind" in job_types: datafindjob = pipeline.LSCDataFindJob(os.path.join(os.getcwd(), get_cache_dir(config_parser)), os.path.join(os.getcwd(), get_out_dir(config_parser)), config_parser) # rm if "rm" in job_types: rmjob = RMJob(config_parser) # lalapps_binj if "binj" in job_types: binjjob = BurstInjJob(config_parser) # lalapps_power if "power" in job_types: powerjob = PowerJob(config_parser) # ligolw_add if "lladd" in job_types: lladdjob = pipeline.LigolwAddJob(get_out_dir(config_parser), config_parser) lladdjob.cache_dir = get_cache_dir(config_parser) # ligolw_binjfind if "binjfind" in job_types: binjfindjob = BinjfindJob(config_parser) # ligolw_bucut if "bucut" in job_types: bucutjob = BucutJob(config_parser) # ligolw_bucluster if "bucluster" in job_types: buclusterjob = BuclusterJob(config_parser) # ligolw_burca if "burca" in job_types: burcajob = BurcaJob(config_parser) # ligolw_burca if "burca2" in job_types: burca2job = Burca2Job(config_parser) # ligolw_sqlite if "sqlite" in job_types: sqlitejob = SQLiteJob(config_parser) # ligolw_burca_tailor if "burcatailor" in job_types: burcatailorjob = BurcaTailorJob(config_parser)
for cache_entry, parent in input_cache[:binjfindjob.files_per_binjfind]:
for parent in set(parent for cache_entry, parent in input_cache[:binjfindjob.files_per_binjfind]):
def make_binjfind_fragment(dag, parents, tag, verbose = False): input_cache = collect_output_caches(parents) nodes = set() while input_cache: node = BinjfindNode(binjfindjob) node.add_input_cache([cache_entry for (cache_entry, parent) in input_cache[:binjfindjob.files_per_binjfind]]) for cache_entry, parent in input_cache[:binjfindjob.files_per_binjfind]: node.add_parent(parent) del input_cache[:binjfindjob.files_per_binjfind] seg = cache_span(node.get_input_cache()) node.set_name("ligolw_binjfind_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) node.add_macro("macrocomment", tag) dag.add_node(node) nodes.add(node) return nodes
for cache_entry, parent in input_cache[:buclusterjob.files_per_bucluster]:
for parent in set(parent for cache_entry, parent in input_cache[:buclusterjob.files_per_bucluster]):
def make_bucluster_fragment(dag, parents, tag, verbose = False): input_cache = collect_output_caches(parents) nodes = set() while input_cache: node = BuclusterNode(buclusterjob) node.add_input_cache([cache_entry for (cache_entry, parent) in input_cache[:buclusterjob.files_per_bucluster]]) for cache_entry, parent in input_cache[:buclusterjob.files_per_bucluster]: node.add_parent(parent) del input_cache[:buclusterjob.files_per_bucluster] seg = cache_span(node.get_input_cache()) node.set_name("ligolw_bucluster_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) node.add_macro("macrocomment", tag) node.set_retry(3) dag.add_node(node) nodes.add(node) return nodes
for cache_entry, parent in input_cache[:bucutjob.files_per_bucut]:
for parent in set(parent for cache_entry, parent in input_cache[:bucutjob.files_per_bucut]):
def make_bucut_fragment(dag, parents, tag, verbose = False): input_cache = collect_output_caches(parents) nodes = set() while input_cache: node = BucutNode(bucutjob) node.add_input_cache([cache_entry for (cache_entry, parent) in input_cache[:bucutjob.files_per_bucut]]) for cache_entry, parent in input_cache[:bucutjob.files_per_bucut]: node.add_parent(parent) del input_cache[:bucutjob.files_per_bucut] seg = cache_span(node.get_input_cache()) node.set_name("ligolw_bucut_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) node.add_macro("macrocomment", tag) dag.add_node(node) nodes.add(node) return nodes
for cache_entry, parent in input_cache[:burcajob.files_per_burca]:
for parent in set(parent for cache_entry, parent in input_cache[:burcajob.files_per_burca]):
def make_burca_fragment(dag, parents, tag, coincidence_segments = None, verbose = False): input_cache = collect_output_caches(parents) if coincidence_segments is not None: # doesn't sense to supply this keyword argument for # more than one input file assert len(input_cache) == 1 nodes = set() while input_cache: node = BurcaNode(burcajob) node.add_input_cache([cache_entry for (cache_entry, parent) in input_cache[:burcajob.files_per_burca]]) for cache_entry, parent in input_cache[:burcajob.files_per_burca]: node.add_parent(parent) del input_cache[:burcajob.files_per_burca] seg = cache_span(node.get_input_cache()) node.set_name("ligolw_burca_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) if coincidence_segments is not None: node.set_coincidence_segments(coincidence_segments) node.add_macro("macrocomment", tag) dag.add_node(node) nodes.add(node) return nodes
self.add_var_opt('eventnum',str(event))
self.add_var_arg('--eventnum '+str(event))
def set_event_number(self,event): """ Set the event number in the injection XML. """ if event is not None: self.__event=int(event) self.add_var_opt('eventnum',str(event))
def init_job_types(config_parser, job_types = ("string", "meas_likelihoodjob", "calc_likelihood")):
runsqlitejob = None def init_job_types(config_parser, job_types = ("string", "meas_likelihoodjob", "calc_likelihood", "runsqlite")):
def compute_segment_lists(seglists, offset_vectors, min_segment_length, pad): # don't modify original seglists = seglists.copy() # ignore offset vectors referencing instruments we don't have offset_vectors = [offset_vector for offset_vector in offset_vectors if set(offset_vector.keys()).issubset(set(seglists.keys()))] # cull too-short single-instrument segments from the input # segmentlist dictionary; this can significantly increase # the speed of the llwapp.get_coincident_segmentlistdict() # function when the input segmentlists have had many data # quality holes poked out of them remove_too_short_segments(seglists, min_segment_length, pad) # extract the segments that are coincident under the time # slides new = llwapp.get_coincident_segmentlistdict(seglists, offset_vectors) # round to integer boundaries because lalapps_StringSearch can't accept # non-integer start/stop times # FIXME: fix that in lalapps_StringSearch for seglist in new.values(): for i in range(len(seglist)): seglist[i] = segments.segment(int(math.floor(seglist[i][0])), int(math.ceil(seglist[i][1]))) # intersect with original segments to ensure we haven't expanded beyond # original bounds new &= seglists # again remove too-short segments remove_too_short_segments(new, min_segment_length, pad) # done return new
global stringjob, meas_likelihoodjob, calc_likelihoodjob
global stringjob, meas_likelihoodjob, calc_likelihoodjob, runsqlitejob
def init_job_types(config_parser, job_types = ("string", "meas_likelihoodjob", "calc_likelihood")): """ Construct definitions of the submit files. """ global stringjob, meas_likelihoodjob, calc_likelihoodjob # lalapps_StringSearch if "string" in job_types: stringjob = StringJob(config_parser) # lalapps_string_meas_likelihood if "meas_likelihood" in job_types: meas_likelihoodjob = MeasLikelihoodJob(config_parser) # lalapps_string_calc_likelihood if "calc_likelihood" in job_types: calc_likelihoodjob = CalcLikelihoodJob(config_parser)
this_input_cache = input_cache[:files_per_meas_likelihood]
node.add_input_cache([cache_entry for cache_entry, parent in input_cache[:files_per_meas_likelihood]]) for parent in set(parent for cache_entry, parent in input_cache[:files_per_meas_likelihood]): node.add_parent(parent)
def make_meas_likelihood_fragment(dag, parents, tag, files_per_meas_likelihood = None): if files_per_meas_likelihood is None: files_per_meas_likelihood = meas_likelihoodjob.files_per_meas_likelihood nodes = set() input_cache = power.collect_output_caches(parents) while input_cache: node = MeasLikelihoodNode(meas_likelihoodjob) this_input_cache = input_cache[:files_per_meas_likelihood] del input_cache[:files_per_meas_likelihood] for cache_entry, parent in this_input_cache: node.add_input_cache([cache_entry]) node.add_parent(parent) seg = power.cache_span(node.get_input_cache()) node.set_name("lalapps_string_meas_likelihood_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) node.set_output(tag) dag.add_node(node) nodes.add(node) return nodes
for cache_entry, parent in this_input_cache: node.add_input_cache([cache_entry]) node.add_parent(parent)
def make_meas_likelihood_fragment(dag, parents, tag, files_per_meas_likelihood = None): if files_per_meas_likelihood is None: files_per_meas_likelihood = meas_likelihoodjob.files_per_meas_likelihood nodes = set() input_cache = power.collect_output_caches(parents) while input_cache: node = MeasLikelihoodNode(meas_likelihoodjob) this_input_cache = input_cache[:files_per_meas_likelihood] del input_cache[:files_per_meas_likelihood] for cache_entry, parent in this_input_cache: node.add_input_cache([cache_entry]) node.add_parent(parent) seg = power.cache_span(node.get_input_cache()) node.set_name("lalapps_string_meas_likelihood_%s_%d_%d" % (tag, int(seg[0]), int(abs(seg)))) node.set_output(tag) dag.add_node(node) nodes.add(node) return nodes
self.add_opt("use-expected-loudest-event",'')
self.add_var_opt("use-expected-loudest-event",'')
def __init__(self, job, database, output_cache = None, output_tag = "SEARCH_VOLUME", bootstrap_iterations=10000, veto_segments_name="vetoes", use_expected_loudest_event = False): """ @database: the pipedown database containing the injection triggers @ouptut_cache: name prefix for cache file to be written out by program @output_tag: a string label for the output files @use_expected_loudest_event: disables the use of loudest event FAR, use 1./livetime instead """ pipeline.SqliteNode.__init__(self, job) self.add_var_arg(database)
inspiral_hipe_file = open(hipe_cache(ifos, usertag, \ hipecp.getint("input", "gps-start-time"), \ hipecp.getint("input", "gps-end-time")), "a") symlinkedCache.tofile(inspiral_hipe_file) inspiral_hipe_file.close()
def test_and_add_hipe_arg(hipeCommand, hipe_arg): if config.has_option("hipe-arguments",hipe_arg): hipeCommand += "--" + hipe_arg + " " + \ config.get("hipe-arguments",hipe_arg) return(hipeCommand)
def get_input_from_cache(self, cache): """ Retrieves """ self.add_var_arg(filename)
def get_input_from_cache(self, cache): """ Retrieves """ self.add_var_arg(filename)
self.__sim_type = None
self.__sim_tag = None
def __init__(self, job): """ @job: a PrintLCJob """ pipeline.SqliteNode.__init__(self, job) self.__extract_to_xml = None self.__extract_to_database = None self.__exclude_coincs = None self.__include_only_coincs = None self.__sim_type = None self.__output_format = None self.__columns = None
def __init__(self, *args): pipeline.LigolwAddNode.__init__(self, *args)
def __init__(self, job, remove_input, *args): pipeline.LigolwAddNode.__init__(self, job, *args)
def __init__(self, *args): pipeline.LigolwAddNode.__init__(self, *args) self.input_cache = [] self.output_cache = [] self.cache_dir = os.path.join(os.getcwd(), self.job().cache_dir)
for c in cache: self.add_var_arg("--remove-input-except %s" % c.path())
if self.remove_input: for c in cache: self.add_var_arg("--remove-input-except %s" % c.path())
def add_preserve_cache(self, cache): for c in cache: self.add_var_arg("--remove-input-except %s" % c.path())
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob)
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, remove_input = False, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob, remove_input = remove_input)
def make_lladd_fragment(dag, parents, tag, segment = None, input_cache = None, preserve_cache = None, extra_input_cache = None): node = LigolwAddNode(lladdjob) # link to parents for parent in parents: node.add_parent(parent) # build input cache if input_cache is None: # default is to use all output files from parents for parent in parents: node.add_input_cache(parent.get_output_cache()) else: # but calling code can provide its own collection node.add_input_cache(input_cache) if extra_input_cache is not None: # sometimes it helps to add some extra node.add_input_cache(extra_input_cache) if preserve_cache is not None: node.add_preserve_cache(preserve_cache) # construct names for the node and output file, and override the # segment if needed [cache_entry] = node.get_output_cache() if segment is None: segment = cache_entry.segment node.set_name("lladd_%s_%s_%d_%d" % (tag, cache_entry.observatory, int(segment[0]), int(abs(segment)))) node.set_output("%s-%s-%d-%d.xml.gz" % (cache_entry.observatory, tag, int(segment[0]), int(abs(segment))), segment = segment) node.set_retry(3) dag.add_node(node) return set([node])
return make_lladd_fragment(dag, nodes, tag)
return make_lladd_fragment(dag, nodes, tag, remove_input = True)
def make_multibinj_fragment(dag, seg, tag): flow = float(powerjob.get_opts()["low-freq-cutoff"]) fhigh = flow + float(powerjob.get_opts()["bandwidth"]) nodes = make_binj_fragment(dag, seg, tag, 0.0, flow, fhigh) return make_lladd_fragment(dag, nodes, tag)
infile = '%s/%s.in' % (src_dir, basename)
infile = '%s.in' % basename
def generate_git_version_info(): # info object info = git_info() git_path = check_call_out(('which', 'git')) # determine basic info about the commit # %H -- full git hash id # %ct -- commit time # %an, %ae -- author name, email # %cn, %ce -- committer name, email git_id, git_udate, git_author_name, git_author_email, \ git_committer_name, git_committer_email = \ check_call_out((git_path, 'log', '-1', '--pretty=format:%H,%ct,%an,%ae,%cn,%ce')).split(",") git_date = time.strftime('%Y-%m-%d %H:%M:%S +0000', time.gmtime(float(git_udate))) git_author = '%s <%s>' % (git_author_name, git_author_email) git_committer = '%s <%s>' % (git_committer_name, git_committer_email) # determine branch branch_match = check_call_out((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD')) if branch_match == "HEAD": git_branch = None else: git_branch = os.path.basename(branch_match) # determine tag status, git_tag = call_out((git_path, 'describe', '--exact-match', '--tags', git_id)) if status != 0: git_tag = None # refresh index check_call_out((git_path, 'update-index', '-q', '--refresh')) # check working copy for changes status_output = subprocess.call((git_path, 'diff-files', '--quiet')) if status_output != 0: git_status = 'UNCLEAN: Modified working tree' else: # check index for changes status_output = subprocess.call((git_path, 'diff-index', '--cached', '--quiet', 'HEAD')) if status_output != 0: git_status = 'UNCLEAN: Modified index' else: git_status = 'CLEAN: All modifications committed' # determine version strings info.id = git_id info.date = git_date info.branch = git_branch info.tag = git_tag info.author = git_author info.committer = git_committer info.status = git_status return info
oldVal=None
def __init__(self,cp,block_id,dagDir,channel=''): self.dagDirectory=dagDir self.__executable = cp.get('condor','clustertool') self.__universe= cp .get('condor','clustertool_universe') pipeline.CondorDAGJob.__init__(self,self.__universe,self.__executable) pipeline.AnalysisJob.__init__(self,cp) self.add_condor_cmd('getenv','True') self.block_id=blockID=block_id #layerID='RESULTS_'+str(blockID) layerID='RESULTS_'+channel+'_'+str(blockID) #Do not set channel name information here. This puts all #threshold output files into same place self.initialDir=layerPath=determineLayerPath(cp,blockID,layerID) blockPath=determineBlockPath(cp,blockID,channel) #Setup needed directories for this job to write in! buildDir(blockPath) buildDir(layerPath) buildDir(blockPath+'/logs') self.set_stdout_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).out')) self.set_stderr_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).err')) filename="/tracksearchThreshold--"+str(channel)+".sub" self.set_sub_file(os.path.normpath(self.dagDirectory+filename)) #Load in the cluster configuration sections! #Add the candidateUtils.py equivalent library to dag for proper #execution! self.candUtil=str(cp.get('pylibraryfiles','pyutilfile')) if ((self.__universe == 'scheduler') or (self.__universe == 'local')): self.add_condor_cmd('environment','PYTHONPATH=$PYTHONPATH:'+os.path.abspath(os.path.dirname(self.candUtil))) else: self.add_condor_cmd('should_transfer_files','yes') self.add_condor_cmd('when_to_transfer_output','on_exit') self.add_condor_cmd('transfer_input_files',self.candUtil) self.add_condor_cmd('initialdir',self.initialDir) #Setp escaping possible quotes in threshold string! optionTextList=[str('expression-threshold'),str('percentile-cut')] for optionText in optionTextList: oldVal=None if cp.has_option('candidatethreshold',optionText): oldVal=cp.get('candidatethreshold',optionText) newVal=str(oldVal) #New shell escape for latest condor 7.2.4 if newVal.__contains__('"'): newVal=str(newVal).replace('"','""') cp.set('candidatethreshold',optionText,newVal) for sec in ['candidatethreshold']: self.add_ini_opts(cp,sec) if oldVal != None: cp.set('candidatethreshold',optionText,oldVal)
for sec in ['candidatethreshold']: self.add_ini_opts(cp,sec)
oldValList.append((optionText,oldVal)) for sec in ['candidatethreshold']: self.add_ini_opts(cp,sec) for myOpt,oldVal in oldValList:
def __init__(self,cp,block_id,dagDir,channel=''): self.dagDirectory=dagDir self.__executable = cp.get('condor','clustertool') self.__universe= cp .get('condor','clustertool_universe') pipeline.CondorDAGJob.__init__(self,self.__universe,self.__executable) pipeline.AnalysisJob.__init__(self,cp) self.add_condor_cmd('getenv','True') self.block_id=blockID=block_id #layerID='RESULTS_'+str(blockID) layerID='RESULTS_'+channel+'_'+str(blockID) #Do not set channel name information here. This puts all #threshold output files into same place self.initialDir=layerPath=determineLayerPath(cp,blockID,layerID) blockPath=determineBlockPath(cp,blockID,channel) #Setup needed directories for this job to write in! buildDir(blockPath) buildDir(layerPath) buildDir(blockPath+'/logs') self.set_stdout_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).out')) self.set_stderr_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).err')) filename="/tracksearchThreshold--"+str(channel)+".sub" self.set_sub_file(os.path.normpath(self.dagDirectory+filename)) #Load in the cluster configuration sections! #Add the candidateUtils.py equivalent library to dag for proper #execution! self.candUtil=str(cp.get('pylibraryfiles','pyutilfile')) if ((self.__universe == 'scheduler') or (self.__universe == 'local')): self.add_condor_cmd('environment','PYTHONPATH=$PYTHONPATH:'+os.path.abspath(os.path.dirname(self.candUtil))) else: self.add_condor_cmd('should_transfer_files','yes') self.add_condor_cmd('when_to_transfer_output','on_exit') self.add_condor_cmd('transfer_input_files',self.candUtil) self.add_condor_cmd('initialdir',self.initialDir) #Setp escaping possible quotes in threshold string! optionTextList=[str('expression-threshold'),str('percentile-cut')] for optionText in optionTextList: oldVal=None if cp.has_option('candidatethreshold',optionText): oldVal=cp.get('candidatethreshold',optionText) newVal=str(oldVal) #New shell escape for latest condor 7.2.4 if newVal.__contains__('"'): newVal=str(newVal).replace('"','""') cp.set('candidatethreshold',optionText,newVal) for sec in ['candidatethreshold']: self.add_ini_opts(cp,sec) if oldVal != None: cp.set('candidatethreshold',optionText,oldVal)
cp.set('candidatethreshold',optionText,oldVal)
cp.set('candidatethreshold',myOpt,oldVal)
def __init__(self,cp,block_id,dagDir,channel=''): self.dagDirectory=dagDir self.__executable = cp.get('condor','clustertool') self.__universe= cp .get('condor','clustertool_universe') pipeline.CondorDAGJob.__init__(self,self.__universe,self.__executable) pipeline.AnalysisJob.__init__(self,cp) self.add_condor_cmd('getenv','True') self.block_id=blockID=block_id #layerID='RESULTS_'+str(blockID) layerID='RESULTS_'+channel+'_'+str(blockID) #Do not set channel name information here. This puts all #threshold output files into same place self.initialDir=layerPath=determineLayerPath(cp,blockID,layerID) blockPath=determineBlockPath(cp,blockID,channel) #Setup needed directories for this job to write in! buildDir(blockPath) buildDir(layerPath) buildDir(blockPath+'/logs') self.set_stdout_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).out')) self.set_stderr_file(os.path.normpath(blockPath+'/logs/tracksearchThreshold-$(cluster)-$(process).err')) filename="/tracksearchThreshold--"+str(channel)+".sub" self.set_sub_file(os.path.normpath(self.dagDirectory+filename)) #Load in the cluster configuration sections! #Add the candidateUtils.py equivalent library to dag for proper #execution! self.candUtil=str(cp.get('pylibraryfiles','pyutilfile')) if ((self.__universe == 'scheduler') or (self.__universe == 'local')): self.add_condor_cmd('environment','PYTHONPATH=$PYTHONPATH:'+os.path.abspath(os.path.dirname(self.candUtil))) else: self.add_condor_cmd('should_transfer_files','yes') self.add_condor_cmd('when_to_transfer_output','on_exit') self.add_condor_cmd('transfer_input_files',self.candUtil) self.add_condor_cmd('initialdir',self.initialDir) #Setp escaping possible quotes in threshold string! optionTextList=[str('expression-threshold'),str('percentile-cut')] for optionText in optionTextList: oldVal=None if cp.has_option('candidatethreshold',optionText): oldVal=cp.get('candidatethreshold',optionText) newVal=str(oldVal) #New shell escape for latest condor 7.2.4 if newVal.__contains__('"'): newVal=str(newVal).replace('"','""') cp.set('candidatethreshold',optionText,newVal) for sec in ['candidatethreshold']: self.add_ini_opts(cp,sec) if oldVal != None: cp.set('candidatethreshold',optionText,oldVal)