rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
parentq = self.session.query(Job.job_id).filter(Job.name == parentName) parentid = parentq.one()[0] edge = Edge() edge.parent_id = parentid edge.child_id = job.job_id edge.commit_to_db(self.session)
parentq = self.session.query(Job.job_id).filter(Job.name == parentName).filter(Job.wf_id == job.wf_id) for parentid in parentq.all(): edge = Edge() edge.parent_id = parentid[0] edge.child_id = job.job_id edge.commit_to_db(self.session)
def job(self, linedata): """ @type linedata: dict @param linedata: One line of BP data dict-ified. Handles a job insert event. """ job = self.linedataToObject(linedata, Job()) # get wf_id job.wf_id = self.wf_uuidToId(job.wf_uuid) if job.wf_id == None: er = 'No wf_id associated with wf_uuid %s - can not insert job %s' \ % (job.wf_uuid, job) self.log.error('job', msg=er) return if job.name.startswith('merge_'): job.clustered = True else: job.clustered = False del job.ts, job.event self.log.debug('job', msg=job) # See if this is the initial entry or an update if not self.job_id_cache.has_key((job.wf_id, job.job_submit_seq)): jcheck = self.session.query(Job).filter(Job.wf_id == job.wf_id).filter(Job.job_submit_seq == job.job_submit_seq).first() if not jcheck: # A job entry does not exist so insert job.commit_to_db(self.session) self.job_id_cache[(job.wf_id, job.job_submit_seq)] = job.job_id self.log.debug('job', msg='Inserting new jobid: %s' % job.job_id) else: # A job entry EXISTS but not cached probably due to # an interrupted run. Bulletproofing. Raises a warning # as this is a non-optimal state. self.job_id_cache[(jcheck.wf_id, jcheck.job_submit_seq)] = jcheck.job_id job.job_id = jcheck.job_id job.merge_to_db(self.session) self.log.warn('job', msg='Updating non-cached job: %s' % job) else: jid = self.jobIdFromUnique(job) self.log.debug('job', msg='Updating jobid: %s' % jid) job.job_id = jid # set PK from cache for merge job.merge_to_db(self.session) # process edge information query = self.session.query(Edge).filter(Edge.child_id == job.job_id) if query.count() == 0: self.log.debug('job', msg='Finding edges for job %s (job_id: %s)' % (job.name, job.job_id)) edgeq = self.session.query(EdgeStatic.parent).filter(EdgeStatic.wf_uuid == job.wf_uuid).filter(EdgeStatic.child == job.name) for parent in edgeq.all(): parentName = parent[0] parentq = self.session.query(Job.job_id).filter(Job.name == parentName) parentid = parentq.one()[0] edge = Edge() edge.parent_id = parentid edge.child_id = job.job_id edge.commit_to_db(self.session) pass
try: if short == True: dt = timestamp[:-5] tz = timestamp[-5:] else: dt = timestamp[:-6] tz = timestamp[-6:] tz = tz[:-3] + tz[-2:] if short == False: if dt.find('.'): dt = dt[:dt.find('.')] my_time = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") else: my_time = datetime.datetime.strptime(dt, "%Y%m%dT%H%M%S") my_hour = int(tz[:-2]) my_min = int(tz[-2:]) my_offset = datetime.timedelta(hours=my_hour, minutes=my_min) my_time = my_time - my_offset return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Converting timestamp %s to epoch format" % timestamp) return None
try: m = parse_iso8601.search(timestamp) if m is None: logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) return None else: dt = "%04d-%02d-%02d %02d:%02d:%02d" % (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) tz = m.group(8) my_time = datetime.datetime(*(time.strptime(dt, "%Y-%m-%d %H:%M:%S")[0:6])) if tz.upper() != 'Z': my_offset = datetime.timedelta(hours=int(m.group(9)),minutes=int(m.group(10))) my_time = my_time - my_offset return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp) return None
def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information if short == True: dt = timestamp[:-5] tz = timestamp[-5:] else: dt = timestamp[:-6] tz = timestamp[-6:] tz = tz[:-3] + tz[-2:] # Convert date/time to datetime format if short == False: # Delete microseconds, if any if dt.find('.'): dt = dt[:dt.find('.')] my_time = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") else: my_time = datetime.datetime.strptime(dt, "%Y%m%dT%H%M%S") # Split timezone in hours and minutes my_hour = int(tz[:-2]) my_min = int(tz[-2:]) # Calculate offset my_offset = datetime.timedelta(hours=my_hour, minutes=my_min) # Subtract offset my_time = my_time - my_offset # Turn my_time into Epoch format return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Converting timestamp %s to epoch format" % timestamp) return None
self.workflow_run_wall_time ='-'
self.workflow_cpu_time ='-'
def __init__(self): self.submit_dir ='-' self.workflow_run_time = '-' self.workflow_run_wall_time ='-' self.total_jobs = '-' self.succeeded_jobs ='-' self.failed_jobs ='-' self.unsubmitted_jobs ='-' self.unknown_jobs ='-' self.total_succeeded_tasks ='-' self.total_failed_tasks ='-' self.job_statistics_dict ={} self.transformation_statistics_dict ={}
workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size))
workflow_info +=(("Workflow execution wall time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size))
def get_formatted_workflow_info(self): workflow_info = '' workflow_info +=("#" + self.submit_dir) workflow_info +=( "\n") workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total workflow execution wall time :" + str(self.workflow_run_wall_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total jobs run :" + str(self.total_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs succeeded :" + str(self.succeeded_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs failed :" + str(self.failed_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs unsubmitted :" + str(self.unsubmitted_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs unknown :" + str(self.unknown_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# Total tasks succeeded :" + str(self.total_succeeded_tasks)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# Total tasks failed :" + str(self.total_failed_tasks)).ljust(job_run_statistics_size)) workflow_info +=("\n") return workflow_info
workflow_info +=(("Total workflow execution wall time :" + str(self.workflow_run_wall_time)).ljust(job_run_statistics_size))
workflow_info +=(("Total cpu time consumed :" + str(self.workflow_cpu_time)).ljust(job_run_statistics_size))
def get_formatted_workflow_info(self): workflow_info = '' workflow_info +=("#" + self.submit_dir) workflow_info +=( "\n") workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total workflow execution wall time :" + str(self.workflow_run_wall_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total jobs run :" + str(self.total_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs succeeded :" + str(self.succeeded_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs failed :" + str(self.failed_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs unsubmitted :" + str(self.unsubmitted_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# jobs unknown :" + str(self.unknown_jobs)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# Total tasks succeeded :" + str(self.total_succeeded_tasks)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("# Total tasks failed :" + str(self.total_failed_tasks)).ljust(job_run_statistics_size)) workflow_info +=("\n") return workflow_info
workflow_run_wall_time =0
workflow_cpu_time =0
def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_jobs =0 workflow_run_time = None workflow_run_wall_time =0 if not workflow.is_running : workflow_run_time = convert_to_seconds(workflow.total_time) for job in workflow.jobs: for task in job.tasks: if (task.task_submit_seq > 0): workflow_run_wall_time +=task.duration if (task.exitcode == 0): total_succeeded_tasks +=1 else : total_failed_tasks +=1 # populating statistics details dagman_start_time = workflow.start_events[0].timestamp # for root jobs,dagman_start_time is required, assumption start_event[0] is not none for job in workflow.jobs: if job_stats_dict.has_key(job.name): job_stat = job_stats_dict[job.name] else: job_stat = JobStatistics() job_stats_dict[job.name] = job_stat populate_job_details(job ,job_stat , dagman_start_time) tasks = job.tasks for task in tasks: if (task.task_submit_seq > 0): if transformation_stats_dict.has_key(task.transformation): trans_stats = transformation_stats_dict[task.transformation] else: trans_stats = TransformationStatistics() transformation_stats_dict[task.transformation] = trans_stats populate_transformation_statistics(task ,trans_stats) #Calculating total jobs total_jobs = len(job_stats_dict) #Calculating failed and successful jobs for job_stat in job_stats_dict.values(): if job_stat.is_success: succeeded_jobs +=1 elif job_stat.is_failure: failed_jobs +=1 elif job_stat.state is none: unsubmitted_jobs +=1 else: unknown_jobs +=1 # Assigning value to the workflow object workflow_stat.submit_dir = workflow.submit_dir if workflow_run_time is not None: workflow_stat.workflow_run_time =workflow_run_time workflow_stat.workflow_run_wall_time = workflow_run_wall_time workflow_stat.total_jobs = total_jobs workflow_stat.succeeded_jobs = succeeded_jobs workflow_stat.failed_jobs = failed_jobs workflow_stat.unsubmitted_jobs = unsubmitted_jobs workflow_stat.unknown_jobs = unknown_jobs workflow_stat.total_succeeded_tasks = total_succeeded_tasks workflow_stat.total_failed_tasks =total_failed_tasks workflow_stat.job_statistics_dict = job_stats_dict workflow_stat.transformation_statistics_dict = transformation_stats_dict return workflow_stat
workflow_run_wall_time +=task.duration
workflow_cpu_time +=task.duration
def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_jobs =0 workflow_run_time = None workflow_run_wall_time =0 if not workflow.is_running : workflow_run_time = convert_to_seconds(workflow.total_time) for job in workflow.jobs: for task in job.tasks: if (task.task_submit_seq > 0): workflow_run_wall_time +=task.duration if (task.exitcode == 0): total_succeeded_tasks +=1 else : total_failed_tasks +=1 # populating statistics details dagman_start_time = workflow.start_events[0].timestamp # for root jobs,dagman_start_time is required, assumption start_event[0] is not none for job in workflow.jobs: if job_stats_dict.has_key(job.name): job_stat = job_stats_dict[job.name] else: job_stat = JobStatistics() job_stats_dict[job.name] = job_stat populate_job_details(job ,job_stat , dagman_start_time) tasks = job.tasks for task in tasks: if (task.task_submit_seq > 0): if transformation_stats_dict.has_key(task.transformation): trans_stats = transformation_stats_dict[task.transformation] else: trans_stats = TransformationStatistics() transformation_stats_dict[task.transformation] = trans_stats populate_transformation_statistics(task ,trans_stats) #Calculating total jobs total_jobs = len(job_stats_dict) #Calculating failed and successful jobs for job_stat in job_stats_dict.values(): if job_stat.is_success: succeeded_jobs +=1 elif job_stat.is_failure: failed_jobs +=1 elif job_stat.state is none: unsubmitted_jobs +=1 else: unknown_jobs +=1 # Assigning value to the workflow object workflow_stat.submit_dir = workflow.submit_dir if workflow_run_time is not None: workflow_stat.workflow_run_time =workflow_run_time workflow_stat.workflow_run_wall_time = workflow_run_wall_time workflow_stat.total_jobs = total_jobs workflow_stat.succeeded_jobs = succeeded_jobs workflow_stat.failed_jobs = failed_jobs workflow_stat.unsubmitted_jobs = unsubmitted_jobs workflow_stat.unknown_jobs = unknown_jobs workflow_stat.total_succeeded_tasks = total_succeeded_tasks workflow_stat.total_failed_tasks =total_failed_tasks workflow_stat.job_statistics_dict = job_stats_dict workflow_stat.transformation_statistics_dict = transformation_stats_dict return workflow_stat
workflow_stat.workflow_run_wall_time = workflow_run_wall_time
workflow_stat.workflow_cpu_time = workflow_cpu_time
def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_jobs =0 workflow_run_time = None workflow_run_wall_time =0 if not workflow.is_running : workflow_run_time = convert_to_seconds(workflow.total_time) for job in workflow.jobs: for task in job.tasks: if (task.task_submit_seq > 0): workflow_run_wall_time +=task.duration if (task.exitcode == 0): total_succeeded_tasks +=1 else : total_failed_tasks +=1 # populating statistics details dagman_start_time = workflow.start_events[0].timestamp # for root jobs,dagman_start_time is required, assumption start_event[0] is not none for job in workflow.jobs: if job_stats_dict.has_key(job.name): job_stat = job_stats_dict[job.name] else: job_stat = JobStatistics() job_stats_dict[job.name] = job_stat populate_job_details(job ,job_stat , dagman_start_time) tasks = job.tasks for task in tasks: if (task.task_submit_seq > 0): if transformation_stats_dict.has_key(task.transformation): trans_stats = transformation_stats_dict[task.transformation] else: trans_stats = TransformationStatistics() transformation_stats_dict[task.transformation] = trans_stats populate_transformation_statistics(task ,trans_stats) #Calculating total jobs total_jobs = len(job_stats_dict) #Calculating failed and successful jobs for job_stat in job_stats_dict.values(): if job_stat.is_success: succeeded_jobs +=1 elif job_stat.is_failure: failed_jobs +=1 elif job_stat.state is none: unsubmitted_jobs +=1 else: unknown_jobs +=1 # Assigning value to the workflow object workflow_stat.submit_dir = workflow.submit_dir if workflow_run_time is not None: workflow_stat.workflow_run_time =workflow_run_time workflow_stat.workflow_run_wall_time = workflow_run_wall_time workflow_stat.total_jobs = total_jobs workflow_stat.succeeded_jobs = succeeded_jobs workflow_stat.failed_jobs = failed_jobs workflow_stat.unsubmitted_jobs = unsubmitted_jobs workflow_stat.unknown_jobs = unknown_jobs workflow_stat.total_succeeded_tasks = total_succeeded_tasks workflow_stat.total_failed_tasks =total_failed_tasks workflow_stat.job_statistics_dict = job_stats_dict workflow_stat.transformation_statistics_dict = transformation_stats_dict return workflow_stat
all_stat.workflow_run_wall_time = workflow_stat_list[0].workflow_run_wall_time
all_stat.workflow_cpu_time = workflow_stat_list[0].workflow_cpu_time
def print_workflow_details(workflow_stat_list): # print workflow statistics all_stat = WorkflowStatistics() wf_stats_file = os.path.join(output_dir, "workflow") try: fh = open(wf_stats_file, "w") all_stat.submit_dir = "All" all_stat.workflow_run_time =workflow_stat_list[0].workflow_run_time all_stat.workflow_run_wall_time = workflow_stat_list[0].workflow_run_wall_time all_stat.total_jobs = 0 all_stat.succeeded_jobs =0 all_stat.failed_jobs = 0 all_stat.unsubmitted_jobs =0 all_stat.unknown_jobs =0 all_stat.total_succeeded_tasks =0 all_stat.total_failed_tasks =0 #print job run statistics for workflow_stat in workflow_stat_list: wf_info = workflow_stat.get_formatted_workflow_info() all_stat.total_jobs += workflow_stat.total_jobs all_stat.succeeded_jobs += workflow_stat.succeeded_jobs all_stat.failed_jobs += workflow_stat.failed_jobs all_stat.unsubmitted_jobs += workflow_stat.unsubmitted_jobs all_stat.unknown_jobs += workflow_stat.unknown_jobs all_stat.total_succeeded_tasks += workflow_stat.total_succeeded_tasks all_stat.total_failed_tasks +=workflow_stat.total_failed_tasks fh.write(wf_info) fh.write( "\n") wf_info = all_stat.get_formatted_workflow_info() fh.write(wf_info) fh.write("\n") except IOError: logger.error("Unable to write to file " + wf_stats_file) sys.exit(1) else: fh.close() # print job statistics jobs_stats_file = os.path.join(output_dir, "jobs") try: fh = open(jobs_stats_file, "w") # print legends job_stats_legend = formatted_job_stats_legends() fh.write(job_stats_legend) fh.write( "\n") #print job statistics for workflow_stat in workflow_stat_list: job_info = workflow_stat.get_formatted_job_info() fh.write(job_info) fh.write( "\n") except IOError: logger.error("Unable to write to file " + jobs_stats_file) sys.exit(1) else: fh.close() # printing transformation stats all_trans_stats_dict = {} transformation_stats_file = os.path.join(output_dir, "breakdown.txt") try: fh = open(transformation_stats_file, "w") for workflow_stat in workflow_stat_list: trans_info = workflow_stat.get_formatted_transformation_info() fh.write(trans_info) fh.write( "\n") wk_trans_stat = workflow_stat.transformation_statistics_dict for trans_name,trans_stats in wk_trans_stat.items(): if all_trans_stats_dict.has_key(trans_name): stats = all_trans_stats_dict[trans_name] stats.count +=trans_stats.count stats.total_runtime += trans_stats.total_runtime if stats.min > trans_stats.min : stats.min = trans_stats.min if stats.max < trans_stats.max : stats.max = trans_stats.max stats.sum_of_squares += trans_stats.sum_of_squares stats.successcount +=trans_stats.successcount stats.failcount +=trans_stats.failcount else: stats = TransformationStatistics() all_trans_stats_dict[trans_name] = stats stats.count =trans_stats.count stats.total_runtime = trans_stats.total_runtime stats.min = trans_stats.min stats.max = trans_stats.max stats.sum_of_squares = trans_stats.sum_of_squares stats.successcount =trans_stats.successcount stats.failcount =trans_stats.failcount all_stat.transformation_statistics_dict = all_trans_stats_dict trans_info = all_stat.get_formatted_transformation_info() fh.write(trans_info) fh.write( "\n") except IOError: logger.error("Unable to write to file " + transformation_stats_file) sys.exit(1) else: fh.close() logger.info("Workflow run statistics at " + wf_stats_file) logger.info("Job statistics at " + jobs_stats_file) logger.info("Logical transformation statistics at " + transformation_stats_file) return
parse_iso8601 = re.compile(r'(\d{4})-?(\d{2})-?(\d{2})[ tT]?(\d{2}):?(\d{2}):?(\d{2})([.,]\d+)?([zZ]|[-+](\d{2}):?(\d{2}))')
def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: # FIXME: Zone offset is wrong on CentOS 5.5 with python 2.4 my_time = time.localtime(now) if short: if utc: return time.strftime("%Y%m%dT%H%M%SZ", my_time) else: return time.strftime("%Y%m%dT%H%M%S%z", my_time) else: if utc: return time.strftime("%Y-%m-%dT%H:%M:%SZ", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time)
logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp)
logger.warn("unable to match \"%s\" to ISO 8601" % timestamp)
def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m = parse_iso8601.search(timestamp) if m is None: logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) return None else: dt = "%04d-%02d-%02d %02d:%02d:%02d" % (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) tz = m.group(8) # my_time = datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S") my_time = datetime.datetime(*(time.strptime(dt, "%Y-%m-%d %H:%M:%S")[0:6])) if tz.upper() != 'Z': # no zulu time, has zone offset my_offset = datetime.timedelta(hours=int(m.group(9)),minutes=int(m.group(10))) # adjust for time zone offset if tz[0] == '-': my_time = my_time + my_offset else: my_time = my_time - my_offset # Turn my_time into Epoch format return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp) return None
logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp)
logger.warn("unable to parse timestamp \"%s\"" % timestamp)
def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m = parse_iso8601.search(timestamp) if m is None: logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) return None else: dt = "%04d-%02d-%02d %02d:%02d:%02d" % (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) tz = m.group(8) # my_time = datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S") my_time = datetime.datetime(*(time.strptime(dt, "%Y-%m-%d %H:%M:%S")[0:6])) if tz.upper() != 'Z': # no zulu time, has zone offset my_offset = datetime.timedelta(hours=int(m.group(9)),minutes=int(m.group(10))) # adjust for time zone offset if tz[0] == '-': my_time = my_time + my_offset else: my_time = my_time - my_offset # Turn my_time into Epoch format return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp) return None
def slurp_braindb(run):
def slurp_braindb(run, brain_alternate=None):
def slurp_braindb(run): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} my_braindb = os.path.join(run, brainbase) try: my_file = open(my_braindb, 'r') except: # Error opening file return my_config for line in my_file: # Remove \r and/or \n from the end of the line line = line.rstrip("\r\n") # Split the line into a key and a value k, v = line.split(" ", 1) if k == "run" and v != run and run != '.': logger.warn("Warning: run directory mismatch, using %s" % (run)) my_config[k] = run else: # Remove leading and trailing whitespaces from value v = v.strip() my_config[k] = v # Close file my_file.close() # Done! logger.debug("# slurped %s" % (my_braindb)) return my_config
my_braindb = os.path.join(run, brainbase)
if brain_alternate is None: my_braindb = os.path.join(run, brainbase) else: my_braindb = os.path.join(run, brain_alternate)
def slurp_braindb(run): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} my_braindb = os.path.join(run, brainbase) try: my_file = open(my_braindb, 'r') except: # Error opening file return my_config for line in my_file: # Remove \r and/or \n from the end of the line line = line.rstrip("\r\n") # Split the line into a key and a value k, v = line.split(" ", 1) if k == "run" and v != run and run != '.': logger.warn("Warning: run directory mismatch, using %s" % (run)) my_config[k] = run else: # Remove leading and trailing whitespaces from value v = v.strip() my_config[k] = v # Close file my_file.close() # Done! logger.debug("# slurped %s" % (my_braindb)) return my_config
return time.strftime("%Y%m%dT%H%M%S%z", my_time)
if utc: return time.strftime("%Y%m%dT%H%M%SZ", my_time) else: return time.strftime("%Y%m%dT%H%M%S%z", my_time)
def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: my_time = time.localtime(now) if short: return time.strftime("%Y%m%dT%H%M%S%z", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time)
return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time)
if utc: return time.strftime("%Y-%m-%dT%H:%M:%SZ", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time)
def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: my_time = time.localtime(now) if short: return time.strftime("%Y%m%dT%H%M%S%z", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time)
my_time = my_time - my_offset
if tz[0] == '-': my_time = my_time + my_offset else: my_time = my_time - my_offset
def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m = parse_iso8601.search(timestamp) if m is None: logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) return None else: dt = "%04d-%02d-%02d %02d:%02d:%02d" % (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) tz = m.group(8) # my_time = datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S") my_time = datetime.datetime(*(time.strptime(dt, "%Y-%m-%d %H:%M:%S")[0:6])) if tz.upper() != 'Z': # no zulu time, has zone offset my_offset = datetime.timedelta(hours=int(m.group(9)),minutes=int(m.group(10))) # Subtract offset my_time = my_time - my_offset # Turn my_time into Epoch format return int(calendar.timegm(my_time.timetuple())) except: logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp) return None
print "Testing isodate() function" print " long local timestamp:", isodate() print " long utc timestamp:", isodate(utc=True) print "short local timestamp:", isodate(short=True) print " short utc timestamp:", isodate(utc=True,short=True)
now = int(time.time()) print "Testing isodate() function from now=%lu" % (now) print " long local timestamp:", isodate(now=now) print " long utc timestamp:", isodate(now=now,utc=True) print "short local timestamp:", isodate(now=now,short=True) print " short utc timestamp:", isodate(now=now,utc=True,short=True) print print "Testing epochdate() function from above ISO dates" print " long local epochdate:", epochdate(isodate(now=now)) print " long utc epochdate:", epochdate(isodate(now=now,utc=True)) print "short local timestamp:", epochdate(isodate(now=now,short=True)) print " short utc timestamp:", epochdate(isodate(now=now,utc=True,short=True))
def keep_foreground(): """ This function turns the program into almost a daemon, but keep in foreground for Condor. It does not take any parameters and does not return anything. """ # Go to a safe place that is not susceptible to sudden umounts # FIX THIS: It may break some things try: os.chdir('/') except: logger.critical("could not chdir!") sys.exit(1) # Although we cannot set sid, we can still become process group leader try: os.setpgid(0, 0) except: logger.critical("could not setpgid!") sys.exit(1)
logger.warn("Warning: run directory mismatch, using %s" % (run))
logger.warn("run directory mismatch, using %s" % (run))
def slurp_braindb(run, brain_alternate=None): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} if brain_alternate is None: my_braindb = os.path.join(run, brainbase) else: my_braindb = os.path.join(run, brain_alternate) try: my_file = open(my_braindb, 'r') except: # Error opening file return my_config for line in my_file: # Remove \r and/or \n from the end of the line line = line.rstrip("\r\n") # Split the line into a key and a value k, v = line.split(" ", 1) if k == "run" and v != run and run != '.': logger.warn("Warning: run directory mismatch, using %s" % (run)) my_config[k] = run else: # Remove leading and trailing whitespaces from value v = v.strip() my_config[k] = v # Close file my_file.close() # Done! logger.debug("# slurped %s" % (my_braindb)) return my_config
def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False):
def checkMyProxy( self , userKerb=None, Time=100, checkRetrieverRenewer=False):
def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False): """ Note The Name is Really CONFUSING... but functionality is the same as for myproxy """ expires = None if userKerb == None: userKerb = self.getUserKerberos()
if not timeLeftLocal :
if not timeLeft :
def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False): """ Note The Name is Really CONFUSING... but functionality is the same as for myproxy """ expires = None if userKerb == None: userKerb = self.getUserKerberos()
maxEventsWritten = maxEvents, firstEvent = self.firstEvent, firstRun = self.workflowSpec.workflowRunNumber(), firstLumi = self.count)
maxEventsWritten=maxEvents, firstEvent=self.firstEvent, firstRun=self.workflowSpec.workflowRunNumber(), firstLumi=self.count, skipEvents=skipEvents)
def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_
maxEvents = maxEvents, firstEvent = self.firstEvent, firstRun = self.workflowSpec.workflowRunNumber(), firstLumi = self.count)
maxEvents=maxEvents, firstEvent=self.firstEvent, firstRun=self.workflowSpec.workflowRunNumber(), firstLumi=self.count, skipEvents=skipEvents)
def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_
logging.info("Retrying ldapsearch ... (%i/%i)" % (i, retries))
logging.debug("Retrying ldapsearch ... (%i/%i)" % (i, retries))
def ldapsearch(host, dn, filter, attr, logging, scope=ldap.SCOPE_SUBTREE, retries=5): timeout = 45 # seconds for i in range(retries+1): try: if i > 0: logging.info("Retrying ldapsearch ... (%i/%i)" % (i, retries)) time.sleep(i*10) con = ldap.initialize(host) # host = ldap://hostname[:port] bind = TimeoutFunction(con.simple_bind_s, timeout) try: bound = False bind() bound = True except TimeoutFunctionException: raise ldap.LDAPError("Bind timeout") con.search(dn, scope, filter, attr) try: x = con.result(all=1, timeout=timeout)[1] except ldap.SIZELIMIT_EXCEEDED: # Apparently too much output. Let's try to get one # entry at a time instead; that way we'll hopefully get # at least a part of the total output. logging.info("ldap.SIZELIMIT_EXCEEDED ...") x = [] con.search(dn, ldap.SCOPE_SUBTREE, filter, attr) tmp = con.result(all=0, timeout=timeout) while tmp: x.append(tmp[1][0]) try: tmp = con.result(all=0, timeout=timeout) except ldap.SIZELIMIT_EXCEEDED, e: break; con.unbind() break; except ldap.LDAPError, e: logging.debug("ldapsearch: got error '%s' for host %s" % (str(e), host)) if bound: con.unbind() else: raise e return x
self.logging.info("Trying GIIS %s, %s" % (root['host'], root['base']))
self.logging.debug("Trying GIIS %s, %s" % (root['host'], root['base']))
def pick_CEs_from_giis_trees(self, root, tags, vos, seList, blacklist, whitelist, full): """ Recursively traverse the GIIS tree, starting from 'root', return CEs fullfilling requirements. """
self.logging.info("No active (and valid) jobs!")
self.logging.info("No (valid) jobs to query")
def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure.
cmdList.append('myproxy-logon -d -n -s %s -o %s -l \'%s\' -k %s -t 168:00'%\
cmdList.append('myproxy-logon -d -n -s %s -o %s -l \"%s\" -k %s -t 168:00'%\
def logonMyProxy( self, proxyCache, userDN, vo='cms', group=None, role=None): """ """
selectedBlock = chooseBlock(matchedBlocks.keys())
selectedBlock = self.chooseBlock(matchedBlocks.keys())
def getPileupFiles(self, *sites): """ _getPileupFiles_
if ( name == f['PFN']):
if ( name == os.path.basename(f['PFN'])):
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
tmp = string.split(file_name, ".")
only_name = os.path.basename(file_name) tmp = string.split(only_name, ".")
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
tmp = string.split(file_name, "_"+n_job)
tmp = string.split(only_name, "_"+n_job)
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
modifyFile(aFile, file_name)
modifyFile(aFile, os.path.basename(file_name), for_file)
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
exitcode, outputs = self.executeCommand(cmd, timeout = tout)
exitcode, outputs = self.executeCommand(cmd, timeout = self.timeout)
def __init__(self): super(ProtocolUberFtp, self).__init__()
def hackTheEnv(prependCommand = ''): """ HaCk ThE eNv *IMPORTANT NOTES* - this hack is necessary if the SYSTEM python under '/usr/bin') and the EXTERNAL python have the same version - the hack is necessary only for CLI which are python(2) script - the hack reverts PATH & LD_LYBRARY_PATH if an external PYTHON is present - during the hack, replicate entries will be dropped - the hack MUST be placed between the 'proxyString'and the CLI command """ newEnv = prependCommand + ' ' try : pythonCategory = os.environ['PYTHON_CATEGORY'] pyVersionToRemove = os.environ['PYTHON_VERSION'] originalPath = os.environ['PATH'] originalLdLibPath = os.environ['LD_LIBRARY_PATH'] newPath = '' newLdLibPath = '' for x in list(set(originalPath.split(':'))) : if x.find(pyVersionToRemove) == -1 : newPath += x + ':' newEnv += 'PATH=' + newPath[:-1] + ' ' for x in list(set(originalLdLibPath.split(':'))) : if x.find(pyVersionToRemove) == -1 : newLdLibPath += x + ':' newEnv += 'LD_LIBRARY_PATH=' + newLdLibPath[:-1] + ' ' """ originalPythonPath = os.environ['PYTHONPATH'] newPythonPath = '' for x in list(set(originalPythonPath.split(':'))) : if x.find(pyVersionToRemove) == -1 : newPythonPath += x + ':' newEnv += 'PYTHONPATH=' + newPythonPath[:-1] + ' ' """ except : pass return newEnv
def decodeSubmit(self, jsonString): """ specialized method to decode JSON output of glite-wms-job-submit """ # pre-processing the string before decoding toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern3.sub(r'\1","\2"', toParse ) toParse = self.pattern4.sub(r'\1","\2":', toParse ) toParse = self.pattern5.sub(r'}', toParse) toParse = self.pattern6.sub(r'\1"}', toParse) parsedJson = self.decode(toParse) return parsedJson
return self.ExecuteCommand( self.proxyString + command )[0]
out, ret = self.ExecuteCommand( self.proxyString + self.hackEnv + command ) return out
def postMortem( self, schedulerId, outfile, service): """ perform scheduler logging-info """ command = "glite-wms-job-logging-info -v 3 " + schedulerId + \ " > " + outfile return self.ExecuteCommand( self.proxyString + command )[0]
outJson, ret = self.ExecuteCommand(self.proxyString + command)
outJson, ret = self.ExecuteCommand(self.proxyString + self.hackEnv + command)
def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """ # jobId for remapping jobIds = {}
returncode = -666666
returncode = 0
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
self.logging.info('Your server credential will expire in:\n\t%s hours %s minutes %s seconds\n'%(hours,minutes,seconds))
logMsg = 'Your credential for the required server will expire in:\n\t' logMsg += '%s hours %s minutes %s seconds\n'%(hours,minutes,seconds) self.logging.info(logMsg)
def checkMyProxy( self , proxy=None, Time=4, checkRetrieverRenewer=False): """ """ if proxy == None: proxy=self.getUserProxy() ## check the myproxy server valid = True
and subsequent lines are indented by at least 1 space.
and subsequent lines are indented by at least 1 space or start with "This job was only very recently submitted".
def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if line[0].isspace(): s += line + '\n' else: if len(s) > 0: jobs.append(s) s = line + '\n' if len(s) > 0: jobs.append(s) return jobs
s += line + '\n'
s += '\n' + line elif re.match("This job was only very recently submitted", line): s += ' ' + line
def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if line[0].isspace(): s += line + '\n' else: if len(s) > 0: jobs.append(s) s = line + '\n' if len(s) > 0: jobs.append(s) return jobs
jobs.append(s) s = line + '\n'
jobs.append(s + '\n') s = line
def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if line[0].isspace(): s += line + '\n' else: if len(s) > 0: jobs.append(s) s = line + '\n' if len(s) > 0: jobs.append(s) return jobs
args = job['arguments'].replace('\\"', '').replace('\\', '') xrsl += '(arguments=%s)' % args
if job['arguments']: args = job['arguments'].replace('\\"', '').replace('\\', '') xrsl += '(arguments=%s)' % args
def decode(self, job, task, requirements=''): """ prepare scheduler specific job description
for s in task['jobType'].split('&&'): if re.match('^ *\(.*=.*\) *$', s): xrsl += s
if task['jobType']: for s in task['jobType'].split('&&'): if re.match('^ *\(.*=.*\) *$', s): xrsl += s
def decode(self, job, task, requirements=''): """ prepare scheduler specific job description
xrsl_file = os.path.dirname(task['cfgName']) + '/job.xrsl'
xrsl_file = os.path.dirname(task['cfgName'] or './') + '/%s-jobs.xrsl' % task['name']
def submit(self, task, requirements='', config='', service = ''): """ set up submission parameters and submit uses self.decode()
arcIdMatch = re.search("(\w+://([a-zA-Z0-9.-]+)\S*/\d*)", output)
arcIdMatch = re.search("(\w+://([a-zA-Z0-9.-]+)\S*/\d*)", jobstring)
def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure.
arcIdMatch = re.search("URL: (\w+://([a-zA-Z0-9.-]+)\S*/\d*)", output)
arcIdMatch = re.search("URL: (\w+://([a-zA-Z0-9.-]+)\S*/\d*)", jobstring)
def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure.
exitcode = -1
def createDir(self, source, opt = "", tout = None): """ _createDir_ """ exitcode = -1 outputs = "" ll = source.getLynk() source_fullpath = ll.split("file://",1)[1] if self.checkExists(source, opt) is False: cmd = "/bin/mkdir -m 775 -p " + opt + " " + source_fullpath exitcode, outputs = self.executeCommand(cmd, timeout = tout) if exitcode != 0: raise OperationException("Error creating [" +source_fullpath \ + "]\n "+outputs)
int(float(self.eventsPerJob) / float(efficiency))
int(float(self.eventsPerMCDBJob) / float(efficiency))
def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_
ifile = ifile[1:]
ifile = ifile
def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n'
if job.runningJob['status'] == 'C' : continue
def query(self, obj, service='', objType='node'): """ query status of jobs """
logging.WARNING( 'Warning: an error occurred killing subprocess [%s]' \ % str(err) )
pass
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
logging.WARNING( 'Warning: an error occurred closing subprocess [%s] %s %s' \ % (str(err), ''.join(outc)+''.join(errc), p.returncode ))
pass
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
logging.DEBUG(command) logging.DEBUG(returncode) logging.DEBUG(''.join(outc)) logging.DEBUG(''.join(errc))
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-\+\=]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-\+\=])}')
self.pattern1 = re.compile('([^ \t\n\r\f\v\{\}]+)\s') self.pattern2 = re.compile(':"(["|\{])') self.pattern3 = re.compile('"[\s]*"')
def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-\+\=]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-\+\=])}')
toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern3.sub(r'\1","\2"', toParse ) toParse = self.pattern4.sub(r'\1","\2":', toParse ) toParse = self.pattern5.sub(r'}', toParse) toParse = self.pattern6.sub(r'\1"}', toParse)
toParse = jsonString.replace( '\n' , ' ' ) toParse = self.pattern1.sub(r'"\1"', toParse ) toParse = self.pattern2.sub(r'":\1', toParse ) toParse = self.pattern3.sub(r'","', toParse )
def decodeSubmit(self, jsonString): """ specialized method to decode JSON output of glite-wms-job-submit """ # pre-processing the string before decoding toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern3.sub(r'\1","\2"', toParse ) toParse = self.pattern4.sub(r'\1","\2":', toParse ) toParse = self.pattern5.sub(r'}', toParse) toParse = self.pattern6.sub(r'\1"}', toParse) parsedJson = self.decode(toParse)
'hr':'R',
def __init__( self, **args):
'DONE':'SD'
'Done':'SD'
def __init__( self, **args):
map[ 'statusScheduler' ] = st
if st=='r': map[ 'statusScheduler' ] = 'Running'
def queryLocal(self, schedIdList, objType='node' ) :
st = "DONE"
st = "Done"
def queryLocal(self, schedIdList, objType='node' ) :
if not mKilled: raise SchedulerError ( "Unable to kill job "+jobid+" . Reason: ", out )
mKilled2= r2.search(out) if not mKilled and not mKilled2: raise SchedulerError ( "Unable to kill job
def kill( self, obj ): """ kill the job instance
self.session.commit()
if self.database == "MySQL": self.session.commit()
def modify(self, query): """ execute a query which does not return such as insert/update/delete """
self.fresh_env = 'unset LD_LIBRARY_PATH; export PATH=/usr/bin:/bin; source /etc/profile; source %s ; '%env
self.fresh_env = 'unset LD_LIBRARY_PATH; unset GLITE_ENV_SET; export PATH=/usr/bin:/bin; source /etc/profile; source %s ; '%env
def __init__(self): super(ProtocolLcgUtils, self).__init__() self.options = " --verbose " self.options += " --vo=cms " env = '' source = self.expandEnv('RUNTIME_AREA', '/CacheEnv.sh') if os.path.isfile(str(source).strip()): env = str(source) vars = {\ 'OSG_GRID': '/setup.sh', \ 'GLITE_WMS_LOCATION': '/etc/profile.d/glite-wmsui.sh', \ 'GLITE_LOCATION': '/../etc/profile.d/grid-env.sh', \
self.killThreshold = 100
def __init__( self, **args): # call super class init method super(SchedulerGLite, self).__init__(**args) # some initializations self.warnings = [] # typical options self.vo = args.get( "vo", "cms" ) self.service = args.get( "service", "" ) self.config = args.get( "config", "" ) self.delegationId = args.get( "proxyname", "bossproxy" ) # rename output files with submission number self.renameOutputFiles = args.get( "renameOutputFiles", 0 ) self.renameOutputFiles = int( self.renameOutputFiles ) # x509 string & hackEnv for CLI commands if self.cert != '': self.proxyString = "env X509_USER_PROXY=" + self.cert + ' ' self.hackEnv = hackTheEnv() else : self.proxyString = '' self.hackEnv = hackTheEnv('env') # this section requires an improvement.... if os.environ.get('CRABDIR') : self.commandQueryPath = os.environ.get('CRABDIR') + \ '/external/ProdCommon/BossLite/Scheduler/' elif os.environ.get('PRODCOMMON_ROOT') : self.commandQueryPath = os.environ.get('PRODCOMMON_ROOT') + \ '/lib/ProdCommon/BossLite/Scheduler/' else : # Impossible to locate GLiteQueryStatus.py ... raise SchedulerError('Impossible to locate GLiteQueryStatus.py ') # cache pattern to optimize reg-exp substitution self.pathPattern = re.compile('location:([\S]+)$', re.M) self.patternCE = re.compile('(?<= - ).*(?=:)', re.M) # init BossliteJsonDecoder specialized class self.myJSONDecoder = BossliteJsonDecoder()
schedIdList = ""
def kill( self, obj ): """ kill job """ # the object passed is a job if type(obj) == Job and self.valid( obj.runningJob ): # check for the RunningJob integrity schedIdList = str( obj.runningJob['schedulerId'] ).strip() # the object passed is a Task elif type(obj) == Task : schedIdList = "" for job in obj.jobs: if not self.valid( job.runningJob ): continue schedIdList += " " + \ str( job.runningJob['schedulerId'] ).strip() command = "glite-wms-job-cancel --json --noint " + schedIdList out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : raise SchedulerError('error executing glite-wms-job-cancel', out) elif ret == 0 and out.find("result: success") == -1 : raise SchedulerError('error', out)
schedIdList += " " + \ str( job.runningJob['schedulerId'] ).strip() command = "glite-wms-job-cancel --json --noint " + schedIdList out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : raise SchedulerError('error executing glite-wms-job-cancel', out) elif ret == 0 and out.find("result: success") == -1 : raise SchedulerError('error', out)
jobsToKill.append(str( job.runningJob['schedulerId'] ).strip()) chunk = lambda ulist, step: map(lambda i: ulist[i:i+step], xrange(0, len(ulist), step)) lljobs = chunk(jobsToKill, self.killThreshold) for x in lljobs : schedIdList = ' '.join(x) command = "glite-wms-job-cancel --json --noint " + schedIdList out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : raise SchedulerError('error executing glite-wms-job-cancel', out) elif ret == 0 and out.find("result: success") == -1 : raise SchedulerError('error', out) return 0
def kill( self, obj ): """ kill job """ # the object passed is a job if type(obj) == Job and self.valid( obj.runningJob ): # check for the RunningJob integrity schedIdList = str( obj.runningJob['schedulerId'] ).strip() # the object passed is a Task elif type(obj) == Task : schedIdList = "" for job in obj.jobs: if not self.valid( job.runningJob ): continue schedIdList += " " + \ str( job.runningJob['schedulerId'] ).strip() command = "glite-wms-job-cancel --json --noint " + schedIdList out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : raise SchedulerError('error executing glite-wms-job-cancel', out) elif ret == 0 and out.find("result: success") == -1 : raise SchedulerError('error', out)
r = re.compile('^(.*:.*/jobmanager-.*?)-(.*)')
if DEBUG > 1: print 'removeQueues: input %s' % celist r = re.compile('^(.*:.*/(jobmanager|nordugrid|cream)-.*?)-(.*)')
def removeQueues(celist): """ Given a list of CEUniqueIDs, return a list of jobmanager contact strings. """ r = re.compile('^(.*:.*/jobmanager-.*?)-(.*)') jmlist = [] for x in celist: m = r.match(x) if m: item = m.groups()[0] if (jmlist.count(item) == 0): jmlist.append(item) return jmlist
self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])')
self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])')
def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-])}')
'[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"')
'[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*)"')
def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-])}')
'[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-])}')
'[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-\+\=]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-\+\=])}')
def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-])}')
jdl += 'InputSandboxBaseURI = "%s";\n' % task['startDirectory']
def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n'
if ifile.find( 'file:/' ) == 0: globalSandbox += '"' + ifile + '",'
if ifile.find( 'file:/' ) <= 0: globalSandbox += '"' +task['startDirectory']+ ifile + '",'
def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n'
fileout = out.split() fileout[3] = self.__convertPermission__(out[3]) outt.append( fileout )
if out: fileout = out.split() fileout[3] = self.__convertPermission__(out[3]) outt.append( fileout )
def getFileInfo(self, source, token = None, opt = ""): """ rfdir
if self.valid( job.runningJob ) or objType == 'node':
if self.valid( job.runningJob ) :
def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """
formattedParentIds = ','.join(parentIds) formattedJobIds = ','.join(jobIds) command = 'python ' + self.commandQueryPath \ + 'GLiteStatusQuery.py --parentId=%s --jobId=%s' \ % (formattedParentIds, formattedJobIds) outJson, ret = self.ExecuteCommand( self.prefixCommandQuery + \ self.proxyString + command ) try: out = json.loads(outJson) except ValueError: raise SchedulerError('error parsing JSON', out ) if ret != 0 or out['errors']: obj.warnings.append( "Errors: " + str(out['errors']) ) raise SchedulerError('error executing GLiteStatusQuery', \ str(out['errors'])) count = 0 newStates = out['statusQuery'] for jobId in jobIds.values() : obj.jobs[jobId].runningJob['status'] = \ newStates[count]['status'] obj.jobs[jobId].runningJob['scheduledAtSite'] = \ newStates[count]['scheduledAtSite'] obj.jobs[jobId].runningJob['startTime'] = \ newStates[count]['startTime'] obj.jobs[jobId].runningJob['service'] = \ newStates[count]['service'] obj.jobs[jobId].runningJob['statusScheduler'] = \ newStates[count]['statusScheduler'] obj.jobs[jobId].runningJob['destination'] = \ newStates[count]['destination'] obj.jobs[jobId].runningJob['statusReason'] = \ newStates[count]['statusReason'] obj.jobs[jobId].runningJob['lbTimestamp'] = \ newStates[count]['lbTimestamp'] obj.jobs[jobId].runningJob['stopTime'] = \ newStates[count]['stopTime'] count += 1
if jobIds : formattedParentIds = ','.join(parentIds) formattedJobIds = ','.join(jobIds) command = 'python ' + self.commandQueryPath \ + 'GLiteStatusQuery.py --parentId=%s --jobId=%s' \ % (formattedParentIds, formattedJobIds) outJson, ret = self.ExecuteCommand( self.prefixCommandQuery + \ self.proxyString + command) try: out = json.loads(outJson) except ValueError: raise SchedulerError('error parsing JSON', out ) if ret != 0 or out['errors']: obj.warnings.append( "Errors: " + str(out['errors']) ) raise SchedulerError('error executing GLiteStatusQuery', \ str(out['errors'])) count = 0 newStates = out['statusQuery'] for jobId in jobIds.values() : obj.jobs[jobId].runningJob['status'] = \ newStates[count]['status'] obj.jobs[jobId].runningJob['scheduledAtSite'] = \ newStates[count]['scheduledAtSite'] obj.jobs[jobId].runningJob['startTime'] = \ newStates[count]['startTime'] obj.jobs[jobId].runningJob['service'] = \ newStates[count]['service'] obj.jobs[jobId].runningJob['statusScheduler'] = \ newStates[count]['statusScheduler'] obj.jobs[jobId].runningJob['destination'] = \ newStates[count]['destination'] obj.jobs[jobId].runningJob['statusReason'] = \ newStates[count]['statusReason'] obj.jobs[jobId].runningJob['lbTimestamp'] = \ newStates[count]['lbTimestamp'] obj.jobs[jobId].runningJob['stopTime'] = \ newStates[count]['stopTime'] count += 1
def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """
for h in host_list:
for h in deepcopy(host_list):
def getSoftwareAndArch(host_list, software, arch, bdii='exp-bdii.cern.ch'): """ Given a list of CEs, return only those that match a given software and architecture tag """ generateMaps(host_list, bdii) results_list = [] if (software): software = 'VO-cms-' + software else: software = '*' if (arch): arch = 'VO-cms-' + arch else: arch = '*' query = "'(&(GlueHostApplicationSoftwareRunTimeEnvironment="+software+ ")" query += "(GlueHostApplicationSoftwareRunTimeEnvironment="+arch+")" clusterlist = [] for h in host_list: try: clusterlist.append(ce_to_cluster_map[h]) except KeyError: # CE does not map to a Cluster; remove it! host_list.remove(h) query += buildOrQuery('GlueChunkKey=GlueClusterUniqueID', clusterlist) query += ")" pout = runldapquery(query, 'GlueHostApplicationSoftwareRunTimeEnvironment GlueChunkKey', bdii) clusterlist = [x[0][1]['GlueChunkKey'][0] for x in pout] results_list = [] for jm in host_list: cluster = "GlueClusterUniqueID=" + ce_to_cluster_map[jm] if (clusterlist.count(cluster) != 0): results_list.append(jm) return results_list
return os.path.join(dir, name)
return os.path.join(path, name)
def joinPath(self, path, name): """ joining files with base directory """ if path is None or path == '' : return name
if ret != 0 :
if ret != 0 and jobid != "None":
def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information It may use single 'node' scheduler id or bulk id for association """ if type(obj) != Task : raise SchedulerError('wrong argument type', str( type(obj) ))
'Done':'SD'}
'Done':'SD', 'C':'SD'}
def __init__( self, **args): super(SchedulerPbs, self).__init__(**args) self.jobScriptDir=args['jobScriptDir'] self.jobResDir=args['jobResDir'] self.queue=args['queue']
s.append('cd $PBS_O_WORKDIR')
if self.workerNodeWorkDir: s.append('cd ' + self.workerNodeWorkDir)
def submitJob ( self, conn, job, task=None, requirements=''): """ Need to copy the inputsandbox to WN before submitting a job"""
command = "glite-wms-job-output --json --noint " \
command = "glite-wms-job-output --json --noint --dir " + outdir + " " \
def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json --noint " \ + obj.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : if out.find("Proxy File Not Found") != -1 : # Proxy missing # # adapting the error string for JobOutput requirements obj.runningJob.errors.append("Proxy Missing") elif out.find("Output files already retrieved") != -1 : # Output files already retrieved --> Archive! self.logging.warning( obj.runningJob['schedulerId'] + \ ' output already retrieved.' ) obj.runningJob.warnings.append("Job has been purged, " + \ "recovering status") else : self.logging.error( out ) obj.runningJob.errors.append( out ) elif ret == 0 and out.find("result: success") == -1 : # Excluding all the previous cases however something went wrong self.logging.error( obj.runningJob['schedulerId'] + \ ' problems during getOutput operation.' ) obj.runningJob.errors.append(out) else : # Output successfully retrieved without problems # let's copy in the right place... tmp = re.search(self.pathPattern, out) command = "cp -R " + tmp.group(1) + "/* " + outdir + "/" os.system( command ) command = "rm -rf " + tmp.group(1) os.system( command ) # self.logging.debug("Output of %s successfully retrieved" % str(obj.runningJob['schedulerId'])) if obj.runningJob.isError() : raise SchedulerError( obj.runningJob.errors[0][0], \ obj.runningJob.errors[0][1] ) elif type(obj) == Task : # the object passed is a Task for selJob in obj.jobs: if not self.valid( selJob.runningJob ): continue command = "glite-wms-job-output --json --noint " + \ selJob.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command )
command = "cp -R " + tmp.group(1) + "/* " + outdir + "/"
command = "mv " + tmp.group(1) + "/* " + outdir + "/"
def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json --noint " \ + obj.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : if out.find("Proxy File Not Found") != -1 : # Proxy missing # # adapting the error string for JobOutput requirements obj.runningJob.errors.append("Proxy Missing") elif out.find("Output files already retrieved") != -1 : # Output files already retrieved --> Archive! self.logging.warning( obj.runningJob['schedulerId'] + \ ' output already retrieved.' ) obj.runningJob.warnings.append("Job has been purged, " + \ "recovering status") else : self.logging.error( out ) obj.runningJob.errors.append( out ) elif ret == 0 and out.find("result: success") == -1 : # Excluding all the previous cases however something went wrong self.logging.error( obj.runningJob['schedulerId'] + \ ' problems during getOutput operation.' ) obj.runningJob.errors.append(out) else : # Output successfully retrieved without problems # let's copy in the right place... tmp = re.search(self.pathPattern, out) command = "cp -R " + tmp.group(1) + "/* " + outdir + "/" os.system( command ) command = "rm -rf " + tmp.group(1) os.system( command ) # self.logging.debug("Output of %s successfully retrieved" % str(obj.runningJob['schedulerId'])) if obj.runningJob.isError() : raise SchedulerError( obj.runningJob.errors[0][0], \ obj.runningJob.errors[0][1] ) elif type(obj) == Task : # the object passed is a Task for selJob in obj.jobs: if not self.valid( selJob.runningJob ): continue command = "glite-wms-job-output --json --noint " + \ selJob.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command )
command = "glite-wms-job-output --json --noint " + \ selJob.runningJob['schedulerId']
command = "glite-wms-job-output --json --noint --dir " + outdir + " " \ + selJob.runningJob['schedulerId']
def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json --noint " \ + obj.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : if out.find("Proxy File Not Found") != -1 : # Proxy missing # # adapting the error string for JobOutput requirements obj.runningJob.errors.append("Proxy Missing") elif out.find("Output files already retrieved") != -1 : # Output files already retrieved --> Archive! self.logging.warning( obj.runningJob['schedulerId'] + \ ' output already retrieved.' ) obj.runningJob.warnings.append("Job has been purged, " + \ "recovering status") else : self.logging.error( out ) obj.runningJob.errors.append( out ) elif ret == 0 and out.find("result: success") == -1 : # Excluding all the previous cases however something went wrong self.logging.error( obj.runningJob['schedulerId'] + \ ' problems during getOutput operation.' ) obj.runningJob.errors.append(out) else : # Output successfully retrieved without problems # let's copy in the right place... tmp = re.search(self.pathPattern, out) command = "cp -R " + tmp.group(1) + "/* " + outdir + "/" os.system( command ) command = "rm -rf " + tmp.group(1) os.system( command ) # self.logging.debug("Output of %s successfully retrieved" % str(obj.runningJob['schedulerId'])) if obj.runningJob.isError() : raise SchedulerError( obj.runningJob.errors[0][0], \ obj.runningJob.errors[0][1] ) elif type(obj) == Task : # the object passed is a Task for selJob in obj.jobs: if not self.valid( selJob.runningJob ): continue command = "glite-wms-job-output --json --noint " + \ selJob.runningJob['schedulerId'] out, ret = self.ExecuteCommand( self.proxyString + command )
tmp = tmp.replace('doesn"t',"doesn't")
def dumps(self, myString): """ the same interface as simplejson ... """ tmp = str(myString) tmp = tmp.replace('\'','"') tmp = tmp.replace('None','null') return tmp
(r, w, e) = select.select([fd], [], [], None)
(r, w, e) = select.select([fd, fde], [], [], None)
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
if fd in r or fde in r:
if fd in r:
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
try: readerr = p.stderr.read() except: pass
if fde in r: readerr = p.stderr.read()
def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_
template = { 'id' : None, 'jobId' : None, 'taskId' : None, 'schedulerId' : None, 'schedulerParentId' : None,
template = { 'schedulerId' : None, 'schedulerParentId' : None,
def main(): """ __main__ """ # load ad-hoc JSON encoder if simplejson is not present try : import simplejson as json except: json = myJSONEncoder() # parse options try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "parentId=", "jobId="]) except getopt.GetoptError, err: print usage() sys.exit(1) inputFile = None outputFile = None parent = [] jobList = [] for o, a in opts: if o in ("-h", "--help"): print usage() sys.exit(1) elif o in ("-p", "--parentId"): parent = a.split(',') elif o in ("-j", "--jobId"): jobList = a.split(',') else: print '\ Unknown parameter.\n' sys.exit(1) if len(jobList)==0 : print '\nAt least one jobId is needed.\n' sys.exit(1) # LB data structures template = { 'id' : None, 'jobId' : None, 'taskId' : None, 'schedulerId' : None, 'schedulerParentId' : None, 'statusScheduler' : None, 'status' : None, 'statusReason' : None, 'destination' : None, 'lbTimestamp' : None, 'scheduledAtSite' : None, 'startTime' : None, 'stopTime' : None } # jobId for re-mapping jobIds = {} # errors list errors = [] # loop! for job in jobList : rJob = deepcopy(template) rJob['schedulerId'] = job # append in job list jobIds[ job ] = rJob lbInstance = GLiteStatusQuery() if parent : lbInstance.checkJobsBulk( jobIds, parent, errors ) else : lbInstance.checkJobs( jobIds, errors ) if errors : print '\nError during API calls.\n' print str(errors) sys.exit(1) else : print json.dumps(jobIds)
'stopTime' : None
'stopTime' : None, 'service' : None
def main(): """ __main__ """ # load ad-hoc JSON encoder if simplejson is not present try : import simplejson as json except: json = myJSONEncoder() # parse options try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "parentId=", "jobId="]) except getopt.GetoptError, err: print usage() sys.exit(1) inputFile = None outputFile = None parent = [] jobList = [] for o, a in opts: if o in ("-h", "--help"): print usage() sys.exit(1) elif o in ("-p", "--parentId"): parent = a.split(',') elif o in ("-j", "--jobId"): jobList = a.split(',') else: print '\ Unknown parameter.\n' sys.exit(1) if len(jobList)==0 : print '\nAt least one jobId is needed.\n' sys.exit(1) # LB data structures template = { 'id' : None, 'jobId' : None, 'taskId' : None, 'schedulerId' : None, 'schedulerParentId' : None, 'statusScheduler' : None, 'status' : None, 'statusReason' : None, 'destination' : None, 'lbTimestamp' : None, 'scheduledAtSite' : None, 'startTime' : None, 'stopTime' : None } # jobId for re-mapping jobIds = {} # errors list errors = [] # loop! for job in jobList : rJob = deepcopy(template) rJob['schedulerId'] = job # append in job list jobIds[ job ] = rJob lbInstance = GLiteStatusQuery() if parent : lbInstance.checkJobsBulk( jobIds, parent, errors ) else : lbInstance.checkJobs( jobIds, errors ) if errors : print '\nError during API calls.\n' print str(errors) sys.exit(1) else : print json.dumps(jobIds)
aFile['SEName']=for_file['for_lfn']+file_name
aFile['LFN']=for_file['for_lfn']+os.path.basename(file_name)
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
aFile['PFN']=for_file['endpoint']+file_name
aFile['PFN']=for_file['endpoint']+os.path.basename(file_name)
def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return
for job in obj.jobs: if job['name'].count('.'): returned_name = job['name'].replace('.', '_') returnMap[job['name']] = returnMap.pop(returned_name)
def submit( self, obj, requirements='', config ='', service='' ): """ submit a jdl to glite ends with a call to retrieve wms and job,gridid asssociation """
if obj['name'].count('.'): returned_name = obj['name'].replace('.', '_') returnMap[obj['name']] = returnMap.pop(returned_name)
def submit( self, obj, requirements='', config ='', service='' ): """ submit a jdl to glite ends with a call to retrieve wms and job,gridid asssociation """
cmd = 'rm %s'%proxy
cmd = 'rm %s'%userKerb
def destroyCredential(self, userKerb): """ """ if userKerb == None: msg = "Error no valid user kerberos to remove " raise Exception(msg)
created = models.DateTime(auto_now_add=True) modified = models.DateTime(auto_now=True)
created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True)
def __unicode__(self): return self.name
top_domain = netloc.split('.')[-2:]
top_domain = '.'.join(netloc.split('.')[-2:])
def find_organization(self, row): # see if there's a product with the normalized name org = Organization.objects.filter(name__iexact=normalize_org_name(row.get('Source Name'))) if not org.count(): # see if there's a product with the URL urlparts = urlsplit(row.get('Home Page URL', '')) if urlparts: netloc = urlparts.netloc top_domain = netloc.split('.')[-2:] else: return
org = Organization.objects.filter(homepage__icontains=top_domain)
org = Organization.objects.filter( Q( homepage__icontains='/' + top_domain ) | Q( homepage__icontains='.' + top_domain ) )
def find_organization(self, row): # see if there's a product with the normalized name org = Organization.objects.filter(name__iexact=normalize_org_name(row.get('Source Name'))) if not org.count(): # see if there's a product with the URL urlparts = urlsplit(row.get('Home Page URL', '')) if urlparts: netloc = urlparts.netloc top_domain = netloc.split('.')[-2:] else: return
<p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \
def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray">Title original original, 90 мин</span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime 90 >>> m.year 1952 >>> m.title_original u'Title original original' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray"></span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime >>> m.title_original >>> m.year 1952 ''' link = re.compile(r'<p class="name"><a href="/level/1/film/(\d+)/[^"]*">(.+?)</a>').findall(content) if link: object.id = self.prepare_int(link[0][0]) object.title = self.prepare_str(link[0][1])
<span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \
def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray">Title original original, 90 мин</span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime 90 >>> m.year 1952 >>> m.title_original u'Title original original' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray"></span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime >>> m.title_original >>> m.year 1952 ''' link = re.compile(r'<p class="name"><a href="/level/1/film/(\d+)/[^"]*">(.+?)</a>').findall(content) if link: object.id = self.prepare_int(link[0][0]) object.title = self.prepare_str(link[0][1])
>>> m.title u'Title' >>> m.id 179805
def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray">Title original original, 90 мин</span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime 90 >>> m.year 1952 >>> m.title_original u'Title original original' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray"></span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime >>> m.title_original >>> m.year 1952 ''' link = re.compile(r'<p class="name"><a href="/level/1/film/(\d+)/[^"]*">(.+?)</a>').findall(content) if link: object.id = self.prepare_int(link[0][0]) object.title = self.prepare_str(link[0][1])
>>> m.year 1952
>>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <span class="gray">Ultimate Taboo</span> \ </div>') >>> m.runtime >>> m.title_original u'Ultimate Taboo'
def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray">Title original original, 90 мин</span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime 90 >>> m.year 1952 >>> m.title_original u'Title original original' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ <span class="gray"></span> \ <span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ </div>') >>> m.title u'Title' >>> m.id 179805 >>> m.runtime >>> m.title_original >>> m.year 1952 ''' link = re.compile(r'<p class="name"><a href="/level/1/film/(\d+)/[^"]*">(.+?)</a>').findall(content) if link: object.id = self.prepare_int(link[0][0]) object.title = self.prepare_str(link[0][1])