language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def GetSummary(self): """ Gets the overall summary of the job. Fields extracted are: - Total Job Execution Time: job_time (in seconds) - Total time spent on I/O: total_io_time (in seconds) - Total Bytes written/read by the job: total_io_bytes (in bytes) - Types of interfaces used: io_interface_used (array of all interfaces used) - Types of I/O operations: io_operations_used (read or write) - # of files operated upon: files_used - Number of ranks in the job: num_ranks - Data Transfer Size per operation: descriptive stats (min,max,mean, and median) (in bytes) - Data Transfer Bandwidth per operation: descriptive stats (min,max,mean, and median) (in bytes/s) - Overall Access Pattern: sequential: An I/O op issued at an offset greater than where the previous I/O op ended. (%) consecutive: An I/O op issued at the offset immediately after the end of the previous I/O. (%) total_operations: total Operations (Count) - Summary of files used: types: extensions of the file size: descriptive stats (min,max,mean, and median) (in bytes) :return: map of job summary. """ self._throw_if_not_loaded() pattern = self.GetAccessPattern() total_ops = pattern["read"]["total_ops"] + pattern["write"]["total_ops"] total_seq = pattern["read"]["sequential"] + pattern["write"]["sequential"] total_cons = pattern["read"]["consecutive"] + pattern["write"]["consecutive"] file_size_map = self.GetFileSizes() file_sizes = [] for key in file_size_map: file_sizes.append(file_size_map[key]) if len(file_sizes) == 0: file_sizes.append(0) file_sizes = numpy.array(file_sizes) if self._dxt_df.count()['Module'] != 0: return { "type": "full", "job_time": self.GetJobTime(), "total_io_time": self.GetIOTime(), "total_io_bytes": self.GetIOSize(), "io_interface_used": self._dxt_df['Module'].unique(), "io_operations_used": self._dxt_df['Operation'].unique(), "files_used": self._dxt_df["Filename"].unique().tolist(), "num_ranks": self._dxt_df["Rank"].nunique(), "data_transfer_size": { "min": self._dxt_df["Length"].min(), "max": self._dxt_df["Length"].max(), "mean": self._dxt_df["Length"].mean(), "median": self._dxt_df["Length"].median(), }, "data_transfer_bandwidth": { "min": self._dxt_df["bandwidth"].min(), "max": self._dxt_df["bandwidth"].max(), "mean": self._dxt_df["bandwidth"].mean(), "median": self._dxt_df["bandwidth"].median(), }, "access_pattern": { "total_operations": total_ops, "sequential": float(total_seq) * 100.0 / total_ops, "consecutive": float(total_cons) * 100.0 / total_ops }, "file_used_summary": { "types": self._dxt_df['ext'].unique(), "size": { "total": file_sizes.sum(), "min": file_sizes.min(), "max": file_sizes.max(), "mean": file_sizes.mean(), "median": numpy.median(file_sizes) } } } else: operations = [] write = 0 read = 0 if "POSIX" in self._df['Module'].unique(): write += self._df['POSIX_BYTES_WRITTEN'].sum() read += self._df['POSIX_BYTES_READ'].sum() if "MPIIO" in self._df['Module'].unique(): write += self._df['MPIIO_BYTES_WRITTEN'].sum() read += self._df['MPIIO_BYTES_READ'].sum() if "STDIO" in self._df['Module'].unique(): write += self._df['STDIO_BYTES_WRITTEN'].sum() read += self._df['STDIO_BYTES_READ'].sum() if "H5D" in self._df['Module'].unique(): write += self._df['H5D_BYTES_WRITTEN'].sum() read += self._df['H5D_BYTES_READ'].sum() if write > 0: operations.append("write") if read > 0: operations.append("read") return { "type": "basic", "job_time": self.GetJobTime(), "total_io_time": self.GetIOTime(), "total_io_bytes": self.GetIOSize(), "io_interface_used": self._df['Module'].unique(), "io_operations_used": operations, "files_used": self._df["Filename"].unique().tolist(), "access_pattern": { "total_operations": total_ops, "sequential": float(total_seq) * 100.0 / total_ops, "consecutive": float(total_cons) * 100.0 / total_ops }, "file_used_summary": { "types": self._dxt_df['ext'].unique(), "size": { "total": file_sizes.sum(), "min": file_sizes.min(), "max": file_sizes.max(), "mean": file_sizes.mean(), "median": numpy.median(file_sizes) } } }
def GetSummary(self): """ Gets the overall summary of the job. Fields extracted are: - Total Job Execution Time: job_time (in seconds) - Total time spent on I/O: total_io_time (in seconds) - Total Bytes written/read by the job: total_io_bytes (in bytes) - Types of interfaces used: io_interface_used (array of all interfaces used) - Types of I/O operations: io_operations_used (read or write) - # of files operated upon: files_used - Number of ranks in the job: num_ranks - Data Transfer Size per operation: descriptive stats (min,max,mean, and median) (in bytes) - Data Transfer Bandwidth per operation: descriptive stats (min,max,mean, and median) (in bytes/s) - Overall Access Pattern: sequential: An I/O op issued at an offset greater than where the previous I/O op ended. (%) consecutive: An I/O op issued at the offset immediately after the end of the previous I/O. (%) total_operations: total Operations (Count) - Summary of files used: types: extensions of the file size: descriptive stats (min,max,mean, and median) (in bytes) :return: map of job summary. """ self._throw_if_not_loaded() pattern = self.GetAccessPattern() total_ops = pattern["read"]["total_ops"] + pattern["write"]["total_ops"] total_seq = pattern["read"]["sequential"] + pattern["write"]["sequential"] total_cons = pattern["read"]["consecutive"] + pattern["write"]["consecutive"] file_size_map = self.GetFileSizes() file_sizes = [] for key in file_size_map: file_sizes.append(file_size_map[key]) if len(file_sizes) == 0: file_sizes.append(0) file_sizes = numpy.array(file_sizes) if self._dxt_df.count()['Module'] != 0: return { "type": "full", "job_time": self.GetJobTime(), "total_io_time": self.GetIOTime(), "total_io_bytes": self.GetIOSize(), "io_interface_used": self._dxt_df['Module'].unique(), "io_operations_used": self._dxt_df['Operation'].unique(), "files_used": self._dxt_df["Filename"].unique().tolist(), "num_ranks": self._dxt_df["Rank"].nunique(), "data_transfer_size": { "min": self._dxt_df["Length"].min(), "max": self._dxt_df["Length"].max(), "mean": self._dxt_df["Length"].mean(), "median": self._dxt_df["Length"].median(), }, "data_transfer_bandwidth": { "min": self._dxt_df["bandwidth"].min(), "max": self._dxt_df["bandwidth"].max(), "mean": self._dxt_df["bandwidth"].mean(), "median": self._dxt_df["bandwidth"].median(), }, "access_pattern": { "total_operations": total_ops, "sequential": float(total_seq) * 100.0 / total_ops, "consecutive": float(total_cons) * 100.0 / total_ops }, "file_used_summary": { "types": self._dxt_df['ext'].unique(), "size": { "total": file_sizes.sum(), "min": file_sizes.min(), "max": file_sizes.max(), "mean": file_sizes.mean(), "median": numpy.median(file_sizes) } } } else: operations = [] write = 0 read = 0 if "POSIX" in self._df['Module'].unique(): write += self._df['POSIX_BYTES_WRITTEN'].sum() read += self._df['POSIX_BYTES_READ'].sum() if "MPIIO" in self._df['Module'].unique(): write += self._df['MPIIO_BYTES_WRITTEN'].sum() read += self._df['MPIIO_BYTES_READ'].sum() if "STDIO" in self._df['Module'].unique(): write += self._df['STDIO_BYTES_WRITTEN'].sum() read += self._df['STDIO_BYTES_READ'].sum() if "H5D" in self._df['Module'].unique(): write += self._df['H5D_BYTES_WRITTEN'].sum() read += self._df['H5D_BYTES_READ'].sum() if write > 0: operations.append("write") if read > 0: operations.append("read") return { "type": "basic", "job_time": self.GetJobTime(), "total_io_time": self.GetIOTime(), "total_io_bytes": self.GetIOSize(), "io_interface_used": self._df['Module'].unique(), "io_operations_used": operations, "files_used": self._df["Filename"].unique().tolist(), "access_pattern": { "total_operations": total_ops, "sequential": float(total_seq) * 100.0 / total_ops, "consecutive": float(total_cons) * 100.0 / total_ops }, "file_used_summary": { "types": self._dxt_df['ext'].unique(), "size": { "total": file_sizes.sum(), "min": file_sizes.min(), "max": file_sizes.max(), "mean": file_sizes.mean(), "median": numpy.median(file_sizes) } } }
Python
def GetHDF5FileSummary(self, filepath, only_special_summary=False): """ Create a summary of the HDF5 file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for HDF5 - a map of hiearchical structure of the file with dataset information - name: Name of the dataset - size: Size of the dataset - shape: shape of the dataset - obj: hdf5 dataset object for future processing Parameters ---------- filepath: full path of the file. only_special_summary: if set to true only special summary is returned Returns ------- map of summary of file """ import h5py self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] file_ext = filepath.split('.')[-1] if file_ext == filename or file_ext != 'h5': raise Exception(str(ErrorCodes.EC1006)) file_obj = h5py.File(filepath, "r") special = self._explore_hdf5(file_obj, filename) if only_special_summary: return special else: file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special }
def GetHDF5FileSummary(self, filepath, only_special_summary=False): """ Create a summary of the HDF5 file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for HDF5 - a map of hiearchical structure of the file with dataset information - name: Name of the dataset - size: Size of the dataset - shape: shape of the dataset - obj: hdf5 dataset object for future processing Parameters ---------- filepath: full path of the file. only_special_summary: if set to true only special summary is returned Returns ------- map of summary of file """ import h5py self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] file_ext = filepath.split('.')[-1] if file_ext == filename or file_ext != 'h5': raise Exception(str(ErrorCodes.EC1006)) file_obj = h5py.File(filepath, "r") special = self._explore_hdf5(file_obj, filename) if only_special_summary: return special else: file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special }
Python
def GetTFRecordSummary(self, filepath, features, only_special_summary=False): """ Create a summary of TFRecord file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for TFRecord: - Input: tf_record_features is required. - an list of processed records based on the features passed. Parameters ---------- filepath: full path of the file. features: features to read TFRecord file only_special_summary: if set to true only special summary is returned Returns ------- map of summary of TFRecord file """ import tensorflow as tf self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] file_ext = 'tfrecord' filenames = [filepath] raw_dataset = tf.data.TFRecordDataset(filenames) if len(features) == 0: raise Exception(str(ErrorCodes.EC1008)) self._tf_features = features special = raw_dataset.map(self._parse_tf_record, num_parallel_calls=POOL_SIZE) if only_special_summary: return special else: file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special }
def GetTFRecordSummary(self, filepath, features, only_special_summary=False): """ Create a summary of TFRecord file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for TFRecord: - Input: tf_record_features is required. - an list of processed records based on the features passed. Parameters ---------- filepath: full path of the file. features: features to read TFRecord file only_special_summary: if set to true only special summary is returned Returns ------- map of summary of TFRecord file """ import tensorflow as tf self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] file_ext = 'tfrecord' filenames = [filepath] raw_dataset = tf.data.TFRecordDataset(filenames) if len(features) == 0: raise Exception(str(ErrorCodes.EC1008)) self._tf_features = features special = raw_dataset.map(self._parse_tf_record, num_parallel_calls=POOL_SIZE) if only_special_summary: return special else: file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special }
Python
def GetFileSummary(self, filepath, ext=UNKNOWN, tf_record_features=[]): """ Create a summary of the file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for HDF5 - a map of hiearchical structure of the file with dataset information - name: Name of the dataset - size: Size of the dataset - shape: shape of the dataset - obj: hdf5 dataset object for future processing - for TFRecord: - Input: tf_record_features is required. - an list of processed records based on the features passed. Parameters ---------- filepath: full path of the file. ext: recommended format of the file (Supported are h5 and tfrecord). tf_record_features: if ext is tfrecord then tf_record_features are required. Returns ------- map of summary of file """ self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] if ext == UNKNOWN: file_ext = filepath.split('.')[-1] else: file_ext = ext special_summary = {} if file_ext == filename: file_ext = "" elif file_ext == 'h5': special_summary = self.GetHDF5FileSummary(filepath, only_special_summary=True) elif file_ext == 'tfrecord': if len(tf_record_features) == 0: raise Exception(str(ErrorCodes.EC1008)) special_summary = self.GetTFRecordSummary(filepath, tf_record_features, only_special_summary=True) file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special_summary }
def GetFileSummary(self, filepath, ext=UNKNOWN, tf_record_features=[]): """ Create a summary of the file. General summary includes: - path: full path of the file - filename: name of the file - size: size of file in bytes - ext: format of the file - io_time: time spent by job in performing I/O on this file (in seconds) - io_size: amount of I/O (in bytes) performed on this file - special: A special summary is generate for HDF5 and TFRecord dataset - for HDF5 - a map of hiearchical structure of the file with dataset information - name: Name of the dataset - size: Size of the dataset - shape: shape of the dataset - obj: hdf5 dataset object for future processing - for TFRecord: - Input: tf_record_features is required. - an list of processed records based on the features passed. Parameters ---------- filepath: full path of the file. ext: recommended format of the file (Supported are h5 and tfrecord). tf_record_features: if ext is tfrecord then tf_record_features are required. Returns ------- map of summary of file """ self._throw_if_not_loaded() if filepath is None: raise Exception(str(ErrorCodes.EC1005)) if not os.path.exists(filepath): raise SystemExit(str(ErrorCodes.EC1009)) file_ext_array = os.path.splitext(ntpath.basename(filepath)) filename = file_ext_array[0] if ext == UNKNOWN: file_ext = filepath.split('.')[-1] else: file_ext = ext special_summary = {} if file_ext == filename: file_ext = "" elif file_ext == 'h5': special_summary = self.GetHDF5FileSummary(filepath, only_special_summary=True) elif file_ext == 'tfrecord': if len(tf_record_features) == 0: raise Exception(str(ErrorCodes.EC1008)) special_summary = self.GetTFRecordSummary(filepath, tf_record_features, only_special_summary=True) file_size = pathlib.Path(filepath).stat().st_size return { "path": filepath, "filename": filename, "size": file_size, "ext": file_ext, "io_time": self.GetIOTime(filepath=filepath), "io_size": self.GetIOSize(filepath=filepath), "special": special_summary }
Python
def CreateMergedTimeline(self, tensorboard_dir, merged_timeline_output_dir, merged_timeline_file_prefix, save=True, split_by_ranks=False, split_by_time=False, time_slice=None, timeshift=0): """ This method merges all tracing files from tensorboard_dir with the darshan traces. It first converts hostnames and process id to ranks. (Assumption: hostname and pids are ordered by MPI and then merges the darshan trace with tf logs. :param tensorboard_dir: The log directory where tensorboard logs are present. :param merged_timeline_output_dir: directory where merged timeline should be output. :param merged_timeline_file_prefix: prefix for out files to be written. :param save: if the timeline should be saved :param split_by_ranks: should the timeline be split by ranks. :param split_by_time: should the timeline be split by time. :param time_slice: if timeline is split by time then what is the timeslice. :param timeshift: shifts the darshan timeline. :return: the generated timeline which is merged between darshan and td logs files. """ if tensorboard_dir == None or merged_timeline_file_prefix == None or merged_timeline_output_dir == None: raise Exception(str(ErrorCodes.EC1011)) if not (os.path.exists(tensorboard_dir) and os.path.exists(merged_timeline_output_dir)): raise Exception(str(ErrorCodes.EC10112)) fileExt = "*.trace.json.gz" posix_path_files = list(pathlib.Path(tensorboard_dir).rglob(fileExt)) files = [] for path in posix_path_files: files.append(str(path)) hosts = {} pb_total = len(files); i = 1 for file in files: progress(i, pb_total, status='Parsing TF logs for hostname and Rank') i += 1 # with open(file) as json_file: with gzip.open(file, 'rb') as json_file: data = json.load(json_file) trace_events = list(data["traceEvents"]) filename = os.path.basename(file) hostname = filename.split(".")[0] if hostname not in hosts: hosts[hostname] = {} for trace_event in trace_events: if 'pid' in trace_event and trace_event['pid'] not in hosts[hostname]: hosts[hostname][trace_event['pid']] = {'rank': 0, 'threads': set()} if 'pid' in trace_event and 'tid' in trace_event and trace_event['tid'] not in \ hosts[hostname][trace_event['pid']]['threads']: hosts[hostname][trace_event['pid']]['threads'].add(trace_event['tid']) rank = 0 for hostname in sorted(hosts.keys()): for pid in sorted(hosts[hostname].keys()): hosts[hostname][pid]['rank'] = rank rank += 1 base_json = self.CreateChromeTimeline(save=False, timeshift=timeshift) b_base_json = base_json print("merging") merged_events = [] max_ts = 0 trace_events = list(b_base_json["traceEvents"]) for trace_event in trace_events: if 'pid' in trace_event: pid = trace_event['pid'] merged_events.append(trace_event) if max_ts < trace_event["ts"]: max_ts = trace_event["ts"] pb_total = len(files); i = 1 for file in files: progress(i, pb_total, status='Merging darshan with tf timeline') i += 1 with gzip.open(file, 'rb') as json_file: data = json.load(json_file) trace_events = list(data["traceEvents"]) filename = os.path.basename(file) hostname = filename.split(".")[0] final_traces = [] for trace_event in trace_events: if 'pid' in trace_event and "ts" in trace_event : trace_event['pid'] = hosts[hostname][trace_event['pid']]['rank'] pid = trace_event['pid'] merged_events.append(trace_event) if max_ts < trace_event["ts"]: max_ts = trace_event["ts"] merged_timeline_json = base_json merged_timeline_json["traceEvents"] = merged_events json_file = "{}/{}_complete.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix) json_str = json.dumps(merged_timeline_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) if split_by_ranks: trace_data_proc = [None] * 8 pb_total = len(merged_events) i = 1 for merged_event in merged_events: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Splitting timeline by rank') i += 1 if 'pid' in merged_event: pid = merged_event['pid'] if trace_data_proc[pid] is None: trace_data_proc[pid] = [] trace_data_proc[pid].append(merged_event) for i, trace_data in enumerate(trace_data_proc): b_base_json = base_json b_base_json["traceEvents"] = trace_data json_file = "{}/{}_r{}.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix, i) json_str = json.dumps(b_base_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) elif split_by_time: if time_slice is None: time_slice=100*1e6 num_pieces = math.ceil(max_ts/time_slice) trace_data_time = [None]*num_pieces pb_total = len(merged_events); i = 1 for merged_event in merged_events: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Splitting timeline by time') i += 1 time_piece = int(merged_event["ts"]/time_slice) while time_piece > len(trace_data_time): trace_data_time.append(None) if trace_data_time[time_piece] is None: trace_data_time[time_piece]=[] trace_data_time[time_piece].append(merged_event) for i, trace_data in enumerate(trace_data_time): b_base_json = base_json b_base_json["traceEvents"] = trace_data json_file = "{}/{}_t{}.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix, i) json_str = json.dumps(b_base_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) return merged_timeline_json
def CreateMergedTimeline(self, tensorboard_dir, merged_timeline_output_dir, merged_timeline_file_prefix, save=True, split_by_ranks=False, split_by_time=False, time_slice=None, timeshift=0): """ This method merges all tracing files from tensorboard_dir with the darshan traces. It first converts hostnames and process id to ranks. (Assumption: hostname and pids are ordered by MPI and then merges the darshan trace with tf logs. :param tensorboard_dir: The log directory where tensorboard logs are present. :param merged_timeline_output_dir: directory where merged timeline should be output. :param merged_timeline_file_prefix: prefix for out files to be written. :param save: if the timeline should be saved :param split_by_ranks: should the timeline be split by ranks. :param split_by_time: should the timeline be split by time. :param time_slice: if timeline is split by time then what is the timeslice. :param timeshift: shifts the darshan timeline. :return: the generated timeline which is merged between darshan and td logs files. """ if tensorboard_dir == None or merged_timeline_file_prefix == None or merged_timeline_output_dir == None: raise Exception(str(ErrorCodes.EC1011)) if not (os.path.exists(tensorboard_dir) and os.path.exists(merged_timeline_output_dir)): raise Exception(str(ErrorCodes.EC10112)) fileExt = "*.trace.json.gz" posix_path_files = list(pathlib.Path(tensorboard_dir).rglob(fileExt)) files = [] for path in posix_path_files: files.append(str(path)) hosts = {} pb_total = len(files); i = 1 for file in files: progress(i, pb_total, status='Parsing TF logs for hostname and Rank') i += 1 # with open(file) as json_file: with gzip.open(file, 'rb') as json_file: data = json.load(json_file) trace_events = list(data["traceEvents"]) filename = os.path.basename(file) hostname = filename.split(".")[0] if hostname not in hosts: hosts[hostname] = {} for trace_event in trace_events: if 'pid' in trace_event and trace_event['pid'] not in hosts[hostname]: hosts[hostname][trace_event['pid']] = {'rank': 0, 'threads': set()} if 'pid' in trace_event and 'tid' in trace_event and trace_event['tid'] not in \ hosts[hostname][trace_event['pid']]['threads']: hosts[hostname][trace_event['pid']]['threads'].add(trace_event['tid']) rank = 0 for hostname in sorted(hosts.keys()): for pid in sorted(hosts[hostname].keys()): hosts[hostname][pid]['rank'] = rank rank += 1 base_json = self.CreateChromeTimeline(save=False, timeshift=timeshift) b_base_json = base_json print("merging") merged_events = [] max_ts = 0 trace_events = list(b_base_json["traceEvents"]) for trace_event in trace_events: if 'pid' in trace_event: pid = trace_event['pid'] merged_events.append(trace_event) if max_ts < trace_event["ts"]: max_ts = trace_event["ts"] pb_total = len(files); i = 1 for file in files: progress(i, pb_total, status='Merging darshan with tf timeline') i += 1 with gzip.open(file, 'rb') as json_file: data = json.load(json_file) trace_events = list(data["traceEvents"]) filename = os.path.basename(file) hostname = filename.split(".")[0] final_traces = [] for trace_event in trace_events: if 'pid' in trace_event and "ts" in trace_event : trace_event['pid'] = hosts[hostname][trace_event['pid']]['rank'] pid = trace_event['pid'] merged_events.append(trace_event) if max_ts < trace_event["ts"]: max_ts = trace_event["ts"] merged_timeline_json = base_json merged_timeline_json["traceEvents"] = merged_events json_file = "{}/{}_complete.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix) json_str = json.dumps(merged_timeline_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) if split_by_ranks: trace_data_proc = [None] * 8 pb_total = len(merged_events) i = 1 for merged_event in merged_events: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Splitting timeline by rank') i += 1 if 'pid' in merged_event: pid = merged_event['pid'] if trace_data_proc[pid] is None: trace_data_proc[pid] = [] trace_data_proc[pid].append(merged_event) for i, trace_data in enumerate(trace_data_proc): b_base_json = base_json b_base_json["traceEvents"] = trace_data json_file = "{}/{}_r{}.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix, i) json_str = json.dumps(b_base_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) elif split_by_time: if time_slice is None: time_slice=100*1e6 num_pieces = math.ceil(max_ts/time_slice) trace_data_time = [None]*num_pieces pb_total = len(merged_events); i = 1 for merged_event in merged_events: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Splitting timeline by time') i += 1 time_piece = int(merged_event["ts"]/time_slice) while time_piece > len(trace_data_time): trace_data_time.append(None) if trace_data_time[time_piece] is None: trace_data_time[time_piece]=[] trace_data_time[time_piece].append(merged_event) for i, trace_data in enumerate(trace_data_time): b_base_json = base_json b_base_json["traceEvents"] = trace_data json_file = "{}/{}_t{}.json.gz".format(merged_timeline_output_dir, merged_timeline_file_prefix, i) json_str = json.dumps(b_base_json) + "\n" json_bytes = json_str.encode('utf-8') if save: with gzip.GzipFile(json_file, 'w') as fout: # 4. gzip fout.write(json_bytes) print("written {}".format(json_file)) return merged_timeline_json
Python
def MergeTimelines(self, timeline_file1, timeline_file2, merged_timeline_file): """ This method merges two timeline files. :param timeline_file1: The first timeline to which the second would be merged :param timeline_file2: The timeline file which will be merged into the first :param merged_timeline_file: The output file for the resultant merged timeline. :return: The merged timeline. """ if timeline_file1 == None or timeline_file2 == None or merged_timeline_file == None: raise Exception(str(ErrorCodes.EC1011)) if not os.path.exists(timeline_file1) or not os.path.exists(timeline_file2): raise Exception(str(ErrorCodes.EC10112)) file_1_json = {} with open(timeline_file1) as f: file_1_json = json.load(f) file_2_json = {} with open(timeline_file2) as f: file_2_json = json.load(f) new_trace_values = file_1_json["traceEvents"] new_trace_values.extend(file_2_json["traceEvents"]) file_1_json["traceEvents"] = new_trace_values with open(merged_timeline_file, 'w') as outfile: json.dump(file_1_json, outfile) return file_1_json
def MergeTimelines(self, timeline_file1, timeline_file2, merged_timeline_file): """ This method merges two timeline files. :param timeline_file1: The first timeline to which the second would be merged :param timeline_file2: The timeline file which will be merged into the first :param merged_timeline_file: The output file for the resultant merged timeline. :return: The merged timeline. """ if timeline_file1 == None or timeline_file2 == None or merged_timeline_file == None: raise Exception(str(ErrorCodes.EC1011)) if not os.path.exists(timeline_file1) or not os.path.exists(timeline_file2): raise Exception(str(ErrorCodes.EC10112)) file_1_json = {} with open(timeline_file1) as f: file_1_json = json.load(f) file_2_json = {} with open(timeline_file2) as f: file_2_json = json.load(f) new_trace_values = file_1_json["traceEvents"] new_trace_values.extend(file_2_json["traceEvents"]) file_1_json["traceEvents"] = new_trace_values with open(merged_timeline_file, 'w') as outfile: json.dump(file_1_json, outfile) return file_1_json
Python
def filter(self, **kwargs) -> list: """ Access any and all filters for the fields on the model_class Run query using the given paramters return a list of model_class objects """ db_keys_set = self.filter_for_keys_set(**kwargs) if not len(db_keys_set): return [] return Query.get_many_objects(self.model_class, db_keys_set)
def filter(self, **kwargs) -> list: """ Access any and all filters for the fields on the model_class Run query using the given paramters return a list of model_class objects """ db_keys_set = self.filter_for_keys_set(**kwargs) if not len(db_keys_set): return [] return Query.get_many_objects(self.model_class, db_keys_set)
Python
def format_value_pre_save(self, field_value): """ format field_value before saving to db return corrected field_value assumes validation is already passed """ return field_value
def format_value_pre_save(self, field_value): """ format field_value before saving to db return corrected field_value assumes validation is already passed """ return field_value
Python
def on_save(cls, model_instance: 'Model', field_name: str, field_value, pipeline: redis.client.Pipeline = None, **kwargs): """ for parent classes to override. will run for every field of the model instance, including null attributes runs async with model instance save event, so order of processing is not guaranteed """ # todo: create choice Sets of instance keys for fields using choices option # if model_instance._meta.fields[field_name].choices: # # this will not work! how to edit, delete, prevent overwrite and duplicates? # field_value_b = cls.encode(field_value) # if pipeline: # return pipeline.set(cls.get_special_use_field_db_key(model_instance, field_name), field_value_b) # else: # from ..redis_db import POPOTO_REDIS_DB # return POPOTO_REDIS_DB.set(cls.get_special_use_field_db_key(model_instance, field_name), field_value_b) return pipeline if pipeline else None
def on_save(cls, model_instance: 'Model', field_name: str, field_value, pipeline: redis.client.Pipeline = None, **kwargs): """ for parent classes to override. will run for every field of the model instance, including null attributes runs async with model instance save event, so order of processing is not guaranteed """ # todo: create choice Sets of instance keys for fields using choices option # if model_instance._meta.fields[field_name].choices: # # this will not work! how to edit, delete, prevent overwrite and duplicates? # field_value_b = cls.encode(field_value) # if pipeline: # return pipeline.set(cls.get_special_use_field_db_key(model_instance, field_name), field_value_b) # else: # from ..redis_db import POPOTO_REDIS_DB # return POPOTO_REDIS_DB.set(cls.get_special_use_field_db_key(model_instance, field_name), field_value_b) return pipeline if pipeline else None
Python
def on_delete(cls, model_instance: 'Model', field_name: str, field_value, pipeline=None, **kwargs): """ for parent classes to override. will run for every field of the model instance, including null attributes runs async with model instance delete event, so order of processing is not guaranteed """ return pipeline if pipeline else None
def on_delete(cls, model_instance: 'Model', field_name: str, field_value, pipeline=None, **kwargs): """ for parent classes to override. will run for every field of the model instance, including null attributes runs async with model instance delete event, so order of processing is not guaranteed """ return pipeline if pipeline else None
Python
def format_value_pre_save(self, field_value): """ format field_value before saving to db return corrected field_value assumes validation is already passed """ if isinstance(field_value, GeoField.Coordinates): return field_value if isinstance(field_value, tuple): return GeoField.Coordinates(field_value[0], field_value[1]) if self.null: return GeoField.Coordinates(None, None) return field_value
def format_value_pre_save(self, field_value): """ format field_value before saving to db return corrected field_value assumes validation is already passed """ if isinstance(field_value, GeoField.Coordinates): return field_value if isinstance(field_value, tuple): return GeoField.Coordinates(field_value[0], field_value[1]) if self.null: return GeoField.Coordinates(None, None) return field_value
Python
def lineno(): ''' Returns the current line number in our program. ''' return currentframe().f_back.f_lineno
def lineno(): ''' Returns the current line number in our program. ''' return currentframe().f_back.f_lineno
Python
def multiarch_import(name, sufix=None, using=False): '''Dynamic import for multiarch libraries to match the machine architecture''' # Initialize debugger d = Debugger() d.set_debug() d.set_name("Multiarch") # Detecte if sufix was given imported = None if sufix: # Use it as expected try: imported = __import__("{}{}".format(name, sufix)) if using: d.debug("Using {}{}".format(name, sufix), color='cyan') except Exception as e: d.warning("I have tried to import the library '{}' as you requested using sufix '{} but I have failed to import {}{}, maybe you have forgotten to install the python library, I will try to import the default library!".format(name, sufix, name, sufix)) elif sufix is not "": # No sufix was given, try to detect the architecture using 'whatismyarch()' try: arch = whatismyarch() except Exception as e: d.warning("I have tried to guess your machine architecture using 'whatismyarch()', but the command has failed, do you have gcc command installed?, I will try to import the default library! (Output was: {})".format(output)) # We got an architecture if arch: # Try to import detected architecture try: imported = __import__("{}_{}".format(name, arch)) if using: d.debug("Using {}_{}".format(name, arch), color='cyan') except Exception as e: d.warning("I have guessed with 'whatismyyarch()' that your architecture is '{}', but I have failed to import {}_{}, maybe you have forgotten to install the python library for your architecture, I will try to import the default library!".format(arch, name, arch)) else: d.warning("I couldn't find your architecture with 'whatismyarch()', it will try to import the default library!") if not imported: # No architecture detected, try to import the base library! try: imported = __import__(name) if using: d.debug("Using {}".format(name), color='cyan') except Exception as e: d.debug("Error while import {}, maybe you have forgotten to install the python base library or your environment doesn't have it installed. This script is not able to find it!".format(name), color="red") raise return imported
def multiarch_import(name, sufix=None, using=False): '''Dynamic import for multiarch libraries to match the machine architecture''' # Initialize debugger d = Debugger() d.set_debug() d.set_name("Multiarch") # Detecte if sufix was given imported = None if sufix: # Use it as expected try: imported = __import__("{}{}".format(name, sufix)) if using: d.debug("Using {}{}".format(name, sufix), color='cyan') except Exception as e: d.warning("I have tried to import the library '{}' as you requested using sufix '{} but I have failed to import {}{}, maybe you have forgotten to install the python library, I will try to import the default library!".format(name, sufix, name, sufix)) elif sufix is not "": # No sufix was given, try to detect the architecture using 'whatismyarch()' try: arch = whatismyarch() except Exception as e: d.warning("I have tried to guess your machine architecture using 'whatismyarch()', but the command has failed, do you have gcc command installed?, I will try to import the default library! (Output was: {})".format(output)) # We got an architecture if arch: # Try to import detected architecture try: imported = __import__("{}_{}".format(name, arch)) if using: d.debug("Using {}_{}".format(name, arch), color='cyan') except Exception as e: d.warning("I have guessed with 'whatismyyarch()' that your architecture is '{}', but I have failed to import {}_{}, maybe you have forgotten to install the python library for your architecture, I will try to import the default library!".format(arch, name, arch)) else: d.warning("I couldn't find your architecture with 'whatismyarch()', it will try to import the default library!") if not imported: # No architecture detected, try to import the base library! try: imported = __import__(name) if using: d.debug("Using {}".format(name), color='cyan') except Exception as e: d.debug("Error while import {}, maybe you have forgotten to install the python base library or your environment doesn't have it installed. This script is not able to find it!".format(name), color="red") raise return imported
Python
def module_list(artboard, lst): """ collate layers names into a list """ for layer in reversed(list(artboard.descendants())): if layer.name == 'HEADER': print(f'Excluding layer {layer.name}') else: lst.append(layer.name) return lst
def module_list(artboard, lst): """ collate layers names into a list """ for layer in reversed(list(artboard.descendants())): if layer.name == 'HEADER': print(f'Excluding layer {layer.name}') else: lst.append(layer.name) return lst
Python
def save_image(image, counter, name): """ Save image if counter length is less than or equal to 9 """ if counter <= 9: image.convert('RGB').save(Path(user_directory).joinpath('images', f'{name}_0{str(counter)}.jpg'), quality=85) print(f'{name}_0{str(counter)}.jpg') """ Save image if counter length is greater than 9 """ if counter > 9: image.convert('RGB').save(Path(user_directory).joinpath('images', f'{name}_{str(counter)}.jpg'), quality=85) print(f'{name}_{str(counter)}.jpg')
def save_image(image, counter, name): """ Save image if counter length is less than or equal to 9 """ if counter <= 9: image.convert('RGB').save(Path(user_directory).joinpath('images', f'{name}_0{str(counter)}.jpg'), quality=85) print(f'{name}_0{str(counter)}.jpg') """ Save image if counter length is greater than 9 """ if counter > 9: image.convert('RGB').save(Path(user_directory).joinpath('images', f'{name}_{str(counter)}.jpg'), quality=85) print(f'{name}_{str(counter)}.jpg')
Python
def show_text(self, title, message): """ A method to display text message as system notification """ try: os.system('notify-send "'+title+'" "'+message+'"') except Exception as e: os.system('notify-send "'+self.text_error_title+'" "'+self.text_error_message+'"')
def show_text(self, title, message): """ A method to display text message as system notification """ try: os.system('notify-send "'+title+'" "'+message+'"') except Exception as e: os.system('notify-send "'+self.text_error_title+'" "'+self.text_error_message+'"')
Python
def play_audio(self, audio_path): """ A method to play audio as system notification """ try: os.system('paplay ' + audio_path) except FileNotFoundError: self.show_text("Audio file not found!")
def play_audio(self, audio_path): """ A method to play audio as system notification """ try: os.system('paplay ' + audio_path) except FileNotFoundError: self.show_text("Audio file not found!")
Python
def select_beep_audio_text(self, data, voice): """ A method to select a random beep, audio and respective text transcript of the selected audio """ try: logging.debug("data is: {}".format(data)) messages = data['message'] beep_data = data['beep'] beep_index = random.randint(1, len(beep_data)) beep_temp_path = 'beep_' + str(beep_index) #built in open does not support pathlib like path, need to convert in #string for working in both python 3.5 and 3.6 beep_path = os.path.join(str(self.path_home), beep_data[beep_temp_path]) if voice.lower() != "male" and voice.lower() != "female": voice_id = random.randint(self.min_voice_id, self.max_voice_id) if voice_id == self.male_voice_id: voice = "male" elif voice_id == self.female_voice_id: voice = "female" message = messages[voice] message_index = random.randint(1, len(message)) audio_temp_path = message[str(message_index)]['audio'] audio_path = os.path.join(str(self.path_home), audio_temp_path) text_message = message[str(message_index)]['text'] return audio_path, beep_path, text_message except Exception as e: print("Following exception occured in sound file: {}".format(e)) return None, None, None
def select_beep_audio_text(self, data, voice): """ A method to select a random beep, audio and respective text transcript of the selected audio """ try: logging.debug("data is: {}".format(data)) messages = data['message'] beep_data = data['beep'] beep_index = random.randint(1, len(beep_data)) beep_temp_path = 'beep_' + str(beep_index) #built in open does not support pathlib like path, need to convert in #string for working in both python 3.5 and 3.6 beep_path = os.path.join(str(self.path_home), beep_data[beep_temp_path]) if voice.lower() != "male" and voice.lower() != "female": voice_id = random.randint(self.min_voice_id, self.max_voice_id) if voice_id == self.male_voice_id: voice = "male" elif voice_id == self.female_voice_id: voice = "female" message = messages[voice] message_index = random.randint(1, len(message)) audio_temp_path = message[str(message_index)]['audio'] audio_path = os.path.join(str(self.path_home), audio_temp_path) text_message = message[str(message_index)]['text'] return audio_path, beep_path, text_message except Exception as e: print("Following exception occured in sound file: {}".format(e)) return None, None, None
Python
def play_beep(self, beep_path): """ A method to play beep sound for catsleep """ try: self.util.play_audio(beep_path) except FileNotFoundError: self.util.show_text("Error!", "Something went wrong with the beep notification!")
def play_beep(self, beep_path): """ A method to play beep sound for catsleep """ try: self.util.play_audio(beep_path) except FileNotFoundError: self.util.show_text("Error!", "Something went wrong with the beep notification!")
Python
def play_audio(self, audio_path): """ A method to play audio for catsleep """ try: self.util.play_audio(audio_path) except FileNotFoundError: self.util.show_text("Error!", "Something went wrong with the audio notification!")
def play_audio(self, audio_path): """ A method to play audio for catsleep """ try: self.util.play_audio(audio_path) except FileNotFoundError: self.util.show_text("Error!", "Something went wrong with the audio notification!")
Python
def display_notification(self, text_title, text_message): """ A method to display the notification message """ try: self.util.show_text(text_title, text_message) except Exception as e: self.util.show_text("Error!", "Something went wrong with the text notification!")
def display_notification(self, text_title, text_message): """ A method to display the notification message """ try: self.util.show_text(text_title, text_message) except Exception as e: self.util.show_text("Error!", "Something went wrong with the text notification!")
Python
def load_database(self): """ A method to load database files for audio, beep, texts from data.json file """ database_path = self.conf.db_path logging.debug("database path: {}".format(database_path)) try: with open(str(database_path), 'r') as rf: data = json.load(rf) return data except Exception as e: logging.error("Databse file not found.", exc_info=True) return {}
def load_database(self): """ A method to load database files for audio, beep, texts from data.json file """ database_path = self.conf.db_path logging.debug("database path: {}".format(database_path)) try: with open(str(database_path), 'r') as rf: data = json.load(rf) return data except Exception as e: logging.error("Databse file not found.", exc_info=True) return {}
Python
def catsleep_control(self): """ A method to control the interval and frequency of alarm """ #Show a notification that catsleep is running and will send notification for break time.sleep(10) self.display_notification("Cat Sleep", "Cat Sleep is running and will send notification to take break.") #load the databse file for audio, text and beep sound data = self.load_database() logging.debug("Databse files: {}".format(data)) while True: try: # try to get configurations user_conf = self.conf.get_user_config() logging.debug('voice: {}'.format(user_conf["voice_mode"])) #get selected audio, text and beep for this specifi notification audio_path, beep_path, text_message = self.util.select_beep_audio_text(data, user_conf['voice_mode']) logging.info('audio path: {}'.format(audio_path)) logging.info('beep path: {}'.format(beep_path)) logging.info('text message: `{}`'.format(text_message)) #wait for certain period before next alarm #user input is in minutes, need to convert in seconds sleep_time = user_conf['interval_minutes'] * 60 logging.info('going sleep for {} sec ...'.format(sleep_time)) time.sleep(sleep_time) for alarm in range(user_conf['frequency_number']): #check if beep playing is set as true or not #first play a beep sound, different beep at different time, need to make dynamic if user_conf['play_beep_on'].lower() == "yes": if beep_path: self.play_beep(beep_path) else: self.play_beep() #check if showing text is set as true or not #then display the text notification, different text at different time, need to make dynamic if user_conf['show_text_on'].lower() == "yes": if text_message: self.display_notification("Take Break!", text_message) else: self.display_notification("Take Break!", "Hey, you should take a break") #check if playing audio message is set as true or not #then play a voice message, different message at different time, need to make dynamic if user_conf['play_audio_on'].lower() == "yes": if audio_path: self.play_audio(audio_path) else: self.play_audio(self.default_audio_path) #gap between two consecutive alarms at a slot #user input is in minutes, need to convert in seconds print('-' * 45) time.sleep(user_conf['frequency_interval_minutes'] * 60) except Exception as e: print(e) self.util.show_text("Error!", "Something went wrong!")
def catsleep_control(self): """ A method to control the interval and frequency of alarm """ #Show a notification that catsleep is running and will send notification for break time.sleep(10) self.display_notification("Cat Sleep", "Cat Sleep is running and will send notification to take break.") #load the databse file for audio, text and beep sound data = self.load_database() logging.debug("Databse files: {}".format(data)) while True: try: # try to get configurations user_conf = self.conf.get_user_config() logging.debug('voice: {}'.format(user_conf["voice_mode"])) #get selected audio, text and beep for this specifi notification audio_path, beep_path, text_message = self.util.select_beep_audio_text(data, user_conf['voice_mode']) logging.info('audio path: {}'.format(audio_path)) logging.info('beep path: {}'.format(beep_path)) logging.info('text message: `{}`'.format(text_message)) #wait for certain period before next alarm #user input is in minutes, need to convert in seconds sleep_time = user_conf['interval_minutes'] * 60 logging.info('going sleep for {} sec ...'.format(sleep_time)) time.sleep(sleep_time) for alarm in range(user_conf['frequency_number']): #check if beep playing is set as true or not #first play a beep sound, different beep at different time, need to make dynamic if user_conf['play_beep_on'].lower() == "yes": if beep_path: self.play_beep(beep_path) else: self.play_beep() #check if showing text is set as true or not #then display the text notification, different text at different time, need to make dynamic if user_conf['show_text_on'].lower() == "yes": if text_message: self.display_notification("Take Break!", text_message) else: self.display_notification("Take Break!", "Hey, you should take a break") #check if playing audio message is set as true or not #then play a voice message, different message at different time, need to make dynamic if user_conf['play_audio_on'].lower() == "yes": if audio_path: self.play_audio(audio_path) else: self.play_audio(self.default_audio_path) #gap between two consecutive alarms at a slot #user input is in minutes, need to convert in seconds print('-' * 45) time.sleep(user_conf['frequency_interval_minutes'] * 60) except Exception as e: print(e) self.util.show_text("Error!", "Something went wrong!")
Python
def update(self) -> bool: """ Updates the data in cache. Update of the data will be forced even if data is already cached. `update()` will forcibly request the data source for the data ignoring the current cache. Returns: bool: True if the data source returned data to be cached. False otherwise. The latter means that cache isn't updated. """ pass
def update(self) -> bool: """ Updates the data in cache. Update of the data will be forced even if data is already cached. `update()` will forcibly request the data source for the data ignoring the current cache. Returns: bool: True if the data source returned data to be cached. False otherwise. The latter means that cache isn't updated. """ pass
Python
def clear(self) -> bool: """ Clears all the cache. It will delete all the cahce. So, use carefully. Returns: bool: True if the cache was existed before the clear. False otherwise. """ pass
def clear(self) -> bool: """ Clears all the cache. It will delete all the cahce. So, use carefully. Returns: bool: True if the cache was existed before the clear. False otherwise. """ pass
Python
def append(to_add: Candidate): """ append a candidate on the tail of the CandidateList this function is automatically called when a candidate is created :param to_add: a Candidate object to store """ CandidateList._inner.append(to_add)
def append(to_add: Candidate): """ append a candidate on the tail of the CandidateList this function is automatically called when a candidate is created :param to_add: a Candidate object to store """ CandidateList._inner.append(to_add)
Python
def insert(to_add: Candidate, pos: int): """ Insert a candidate on the position specified in pos argument :param to_add: Candidate to insert in the list :param pos: the index where you want to store the candidate """ CandidateList._inner.insert(pos, to_add)
def insert(to_add: Candidate, pos: int): """ Insert a candidate on the position specified in pos argument :param to_add: Candidate to insert in the list :param pos: the index where you want to store the candidate """ CandidateList._inner.insert(pos, to_add)
Python
def empty_list(): """ Method to empty the CandidateList and reset the Candidate id """ CandidateList._inner = [] Candidate.reset_id()
def empty_list(): """ Method to empty the CandidateList and reset the Candidate id """ CandidateList._inner = [] Candidate.reset_id()
Python
def add_demand(self, to_add: Candidate, accept: bool = True): """ Append a demand in the demands list :param to_add: Candidate to add :param accept: True if member want to accept him False either :return: None """ self._demands.append({'candidate': to_add, 'accept': accept})
def add_demand(self, to_add: Candidate, accept: bool = True): """ Append a demand in the demands list :param to_add: Candidate to add :param accept: True if member want to accept him False either :return: None """ self._demands.append({'candidate': to_add, 'accept': accept})
Python
def remove_demand(self, to_remove: Candidate): """ Remove the json/dictionary which contain the Candidate passed as argument from the demands list :param to_remove: Candidate to remove :return: None """ self._demands.remove({'candidate': to_remove, 'accept': (True or False)})
def remove_demand(self, to_remove: Candidate): """ Remove the json/dictionary which contain the Candidate passed as argument from the demands list :param to_remove: Candidate to remove :return: None """ self._demands.remove({'candidate': to_remove, 'accept': (True or False)})
Python
def append(to_add: Member): """ append a member on the tail of the MemberList this function is automatically called when a member is created :param to_add: a Member object to store """ MemberList._inner.append(to_add)
def append(to_add: Member): """ append a member on the tail of the MemberList this function is automatically called when a member is created :param to_add: a Member object to store """ MemberList._inner.append(to_add)
Python
def insert(to_add: Member, pos: int): """ Insert a member on the position specified in pos argument :param to_add: Member to insert in the list :param pos: the index where you want to store the member """ MemberList._inner.insert(pos, to_add)
def insert(to_add: Member, pos: int): """ Insert a member on the position specified in pos argument :param to_add: Member to insert in the list :param pos: the index where you want to store the member """ MemberList._inner.insert(pos, to_add)
Python
def empty_list(): """ Method to empty the MemberList and reset the Member id """ MemberList._inner = [] Member.reset_id()
def empty_list(): """ Method to empty the MemberList and reset the Member id """ MemberList._inner = [] Member.reset_id()
Python
def snoopHTML(self, fpath): """ Generates data structure for given file, describing it's HTML elements that have and associated style. NOTE: Line numbers are sometimes inaccurate. :param fpath: str :return: """ self._HTML_file_styles[fpath] = [] file = open(fpath).read() file_lines = file.split('\n') soup = Bs(file, 'html.parser') tags = soup.find_all() for tag in tags: styles = {'element': '', 'class': [], 'id': [], 'line_no': 0, 'tag': ''} if tag.has_attr('class'): _class = tag['class'] styles['class'].append(_class) elif tag.has_attr('id'): _id = tag['id'] styles['id'].append(_id) # get open tag of element styles['element'] = str(tag).strip().split('\n')[0] # get tag styles['tag'] = tag.name # if has style if len(styles['class']) != 0 or len(styles['id']) != 0: self._HTML_file_styles[fpath].append(styles) # clean up classes clean_classes = [] for cgroup in styles['class']: for cname in cgroup: clean_classes.append('.' + cname) # clean up ids clean_ids = [] for iname in styles['id']: clean_ids.append('#' + iname) styles['class'] = clean_classes styles['id'] = clean_ids # get line number in file for line in enumerate(file_lines): line_no = line[0] + 1 rline = str(line[1].strip()) opTag = '<' + styles['tag'] # check if matched tag on class if len(styles['class']) != 0: if opTag in rline and styles['class'][0][1:] in rline: styles['line_no'] = line_no # check if matched tag on id elif len(styles['id']) != 0: if opTag in rline and styles['id'][0][1:] in rline: styles['line_no'] = line_no
def snoopHTML(self, fpath): """ Generates data structure for given file, describing it's HTML elements that have and associated style. NOTE: Line numbers are sometimes inaccurate. :param fpath: str :return: """ self._HTML_file_styles[fpath] = [] file = open(fpath).read() file_lines = file.split('\n') soup = Bs(file, 'html.parser') tags = soup.find_all() for tag in tags: styles = {'element': '', 'class': [], 'id': [], 'line_no': 0, 'tag': ''} if tag.has_attr('class'): _class = tag['class'] styles['class'].append(_class) elif tag.has_attr('id'): _id = tag['id'] styles['id'].append(_id) # get open tag of element styles['element'] = str(tag).strip().split('\n')[0] # get tag styles['tag'] = tag.name # if has style if len(styles['class']) != 0 or len(styles['id']) != 0: self._HTML_file_styles[fpath].append(styles) # clean up classes clean_classes = [] for cgroup in styles['class']: for cname in cgroup: clean_classes.append('.' + cname) # clean up ids clean_ids = [] for iname in styles['id']: clean_ids.append('#' + iname) styles['class'] = clean_classes styles['id'] = clean_ids # get line number in file for line in enumerate(file_lines): line_no = line[0] + 1 rline = str(line[1].strip()) opTag = '<' + styles['tag'] # check if matched tag on class if len(styles['class']) != 0: if opTag in rline and styles['class'][0][1:] in rline: styles['line_no'] = line_no # check if matched tag on id elif len(styles['id']) != 0: if opTag in rline and styles['id'][0][1:] in rline: styles['line_no'] = line_no
Python
def snoopHTML_styles(self, fpath): """ Generates data structure organised by class name; each contains filename, line number, element and tag. :return: """ for tag in self._HTML_file_styles[fpath]: for _class in tag['class']: struct = {'file': fpath, 'line_no': tag['line_no'], 'tag': tag['tag'], 'element': tag['element']} # create new style entry if _class not in self._all_styles: self._all_styles[_class] = [struct] # add to existing style entry else: self._all_styles[_class].append(struct) for _id in tag['id']: struct = {'file': fpath, 'line_no': tag['line_no'], 'tag': tag['tag'], 'element': tag['element']} # create new style entry if _id not in self._all_styles: self._all_styles[_id] = [struct] # add to existing style entry else: self._all_styles[_id].append(struct)
def snoopHTML_styles(self, fpath): """ Generates data structure organised by class name; each contains filename, line number, element and tag. :return: """ for tag in self._HTML_file_styles[fpath]: for _class in tag['class']: struct = {'file': fpath, 'line_no': tag['line_no'], 'tag': tag['tag'], 'element': tag['element']} # create new style entry if _class not in self._all_styles: self._all_styles[_class] = [struct] # add to existing style entry else: self._all_styles[_class].append(struct) for _id in tag['id']: struct = {'file': fpath, 'line_no': tag['line_no'], 'tag': tag['tag'], 'element': tag['element']} # create new style entry if _id not in self._all_styles: self._all_styles[_id] = [struct] # add to existing style entry else: self._all_styles[_id].append(struct)
Python
def snoopCSS(self, fpath): """ Generates data structure containing style file name, along with its associated styles :param fpath: str :return: """ self._HTML_file_styles[fpath] = [] file = open(fpath).read() file_lines = file.split('\n') class_id = ['.', '&'] # class identifiers for stylus id_id = ['#'] # id identifiers for stylus struct = {'class': [], 'id': []} for line in file_lines: line = line.strip() if len(line) > 0 and line[0] in class_id \ and line not in struct['class']: struct['class'].append(line) elif len(line) > 0 and line[0] in id_id \ and line not in struct['id']: struct['id'].append(line) self._css_file_styles[fpath] = struct
def snoopCSS(self, fpath): """ Generates data structure containing style file name, along with its associated styles :param fpath: str :return: """ self._HTML_file_styles[fpath] = [] file = open(fpath).read() file_lines = file.split('\n') class_id = ['.', '&'] # class identifiers for stylus id_id = ['#'] # id identifiers for stylus struct = {'class': [], 'id': []} for line in file_lines: line = line.strip() if len(line) > 0 and line[0] in class_id \ and line not in struct['class']: struct['class'].append(line) elif len(line) > 0 and line[0] in id_id \ and line not in struct['id']: struct['id'].append(line) self._css_file_styles[fpath] = struct
Python
def snoopCSS_styles(self, fname): """ Generates key-value pair structure; key = style, value = fname :param fname: :return: """ for _class in self._css_file_styles[fname]['class']: if _class not in self._css_all_styles: self._css_all_styles[_class] = fname for _id in self._css_file_styles[fname]['id']: if _id not in self._css_all_styles: self._css_all_styles[_id] = fname
def snoopCSS_styles(self, fname): """ Generates key-value pair structure; key = style, value = fname :param fname: :return: """ for _class in self._css_file_styles[fname]['class']: if _class not in self._css_all_styles: self._css_all_styles[_class] = fname for _id in self._css_file_styles[fname]['id']: if _id not in self._css_all_styles: self._css_all_styles[_id] = fname
Python
def diffHTML(self): """ Returns list of dictionaries containing file name, line number and element of elements that use undefined style definitions :return: """ diff = [] for style in self._all_styles: if style not in self._css_all_styles and '&' + style not in self._css_all_styles and '>' + style not in self._css_all_styles: obj = {'style': style, 'location': self._all_styles[style]} diff.append(obj) return diff
def diffHTML(self): """ Returns list of dictionaries containing file name, line number and element of elements that use undefined style definitions :return: """ diff = [] for style in self._all_styles: if style not in self._css_all_styles and '&' + style not in self._css_all_styles and '>' + style not in self._css_all_styles: obj = {'style': style, 'location': self._all_styles[style]} diff.append(obj) return diff
Python
def diffCSS(self): """ Returns style definitions, with file paths, that are not used in HTML :return: """ diff = [] for style in self._css_all_styles: style_and = (style.replace('&.', '.')).replace('>.', '.') if style not in self._all_styles and style_and not in self._all_styles: obj = {'style': style, 'location': self._css_all_styles[style]} diff.append(obj) return diff
def diffCSS(self): """ Returns style definitions, with file paths, that are not used in HTML :return: """ diff = [] for style in self._css_all_styles: style_and = (style.replace('&.', '.')).replace('>.', '.') if style not in self._all_styles and style_and not in self._all_styles: obj = {'style': style, 'location': self._css_all_styles[style]} diff.append(obj) return diff
Python
def crawl(self, cwd): """ Crawls through base directory to generate structs for styles and HTML files. Style file extension defaults to '.styl' :return: """ cwd += '/' os.chdir(cwd) # change current working dir to 'cwd' arg src_files = os.listdir(cwd) src_folders = [] # ignore hidden items for item in src_files: if item[0] == '.' or item == 'env': # env, for dev mode src_files.remove(item) for item in src_files: item_path = cwd + item if os.path.isfile(item_path): if item_path.endswith('.html'): self.snoopHTML(item_path) self.snoopHTML_styles(item_path) elif item_path.endswith('.styl'): self.snoopCSS(item_path) self.snoopCSS_styles(item_path) else: src_folders.append(cwd + item) # hardcore recursion for folder in src_folders: self.crawl(folder)
def crawl(self, cwd): """ Crawls through base directory to generate structs for styles and HTML files. Style file extension defaults to '.styl' :return: """ cwd += '/' os.chdir(cwd) # change current working dir to 'cwd' arg src_files = os.listdir(cwd) src_folders = [] # ignore hidden items for item in src_files: if item[0] == '.' or item == 'env': # env, for dev mode src_files.remove(item) for item in src_files: item_path = cwd + item if os.path.isfile(item_path): if item_path.endswith('.html'): self.snoopHTML(item_path) self.snoopHTML_styles(item_path) elif item_path.endswith('.styl'): self.snoopCSS(item_path) self.snoopCSS_styles(item_path) else: src_folders.append(cwd + item) # hardcore recursion for folder in src_folders: self.crawl(folder)
Python
def make_submit(image_name, preds): ''' Convert the prediction of each image to the required submit format :param image_name: image file name :param preds: 5 class prediction mask in numpy array :return: ''' submit = dict() submit['image_name'] = image_name submit['size'] = (preds.shape[1], preds.shape[2]) # (height,width) submit['mask'] = dict() for cls_id in range(0, 5): # 5 classes in this competition mask = preds[cls_id, :, :] cls_id_str = str(cls_id + 1) # class index from 1 to 5,convert to str fortran_mask = np.asfortranarray(mask) rle = maskUtils.encode( fortran_mask) # encode the mask into rle, for detail see: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py submit['mask'][cls_id_str] = rle return submit
def make_submit(image_name, preds): ''' Convert the prediction of each image to the required submit format :param image_name: image file name :param preds: 5 class prediction mask in numpy array :return: ''' submit = dict() submit['image_name'] = image_name submit['size'] = (preds.shape[1], preds.shape[2]) # (height,width) submit['mask'] = dict() for cls_id in range(0, 5): # 5 classes in this competition mask = preds[cls_id, :, :] cls_id_str = str(cls_id + 1) # class index from 1 to 5,convert to str fortran_mask = np.asfortranarray(mask) rle = maskUtils.encode( fortran_mask) # encode the mask into rle, for detail see: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py submit['mask'][cls_id_str] = rle return submit
Python
def modules(self): ''' Get loaded modules, load them if isn't already made. ''' if not hasattr(self, '_modules'): self.load() return self._modules
def modules(self): ''' Get loaded modules, load them if isn't already made. ''' if not hasattr(self, '_modules'): self.load() return self._modules
Python
def table(self, queries): ''' Get a list of modules' counters. ''' return tuple([( module.verbose_name_plural, [module.count(qs) for qs in queries] ) for module in self.modules])
def table(self, queries): ''' Get a list of modules' counters. ''' return tuple([( module.verbose_name_plural, [module.count(qs) for qs in queries] ) for module in self.modules])
Python
def graph(self, days): ''' Get a list of modules' counters for all the given days. ''' return tuple([{ 'data': [(mktime(day.timetuple()) * 1000, module.count(qs)) for day, qs in days], 'label': str(gettext(module.verbose_name_plural)), } for module in self.modules])
def graph(self, days): ''' Get a list of modules' counters for all the given days. ''' return tuple([{ 'data': [(mktime(day.timetuple()) * 1000, module.count(qs)) for day, qs in days], 'label': str(gettext(module.verbose_name_plural)), } for module in self.modules])
Python
def active_users(parser, token): ''' This template tag will get a list of active users based on time, if you do not supply a time to the tag, the default of 15 minutes will be used. With the 'as' clause you can supply what context variable you want the user list to be. There is also a 'in' clause, after in you would specify a amount and a duration. Such as 2 hours, of 10 minutes. Syntax:: {% active_users in [amount] [duration] as [varname] %} {% active_users as [varname] %} {% active_users %} Example usage:: {% load request_tag %} {% active_users in 10 minutes as user_list %} {% for user in user_list %} {{ user.get_username }} {% endfor %} ''' return ActiveUserNode(parser, token)
def active_users(parser, token): ''' This template tag will get a list of active users based on time, if you do not supply a time to the tag, the default of 15 minutes will be used. With the 'as' clause you can supply what context variable you want the user list to be. There is also a 'in' clause, after in you would specify a amount and a duration. Such as 2 hours, of 10 minutes. Syntax:: {% active_users in [amount] [duration] as [varname] %} {% active_users as [varname] %} {% active_users %} Example usage:: {% load request_tag %} {% active_users in 10 minutes as user_list %} {% for user in user_list %} {{ user.get_username }} {% endfor %} ''' return ActiveUserNode(parser, token)
Python
def patch_ENCODE(obj_id, patch_input): '''PATCH an existing ENCODE object and return the response JSON ''' if isinstance(patch_input, dict): json_payload = json.dumps(patch_input) elif isinstance(patch_input, basestring): json_payload = patch_input else: print >> sys.stderr, 'Datatype to patch is not string or dict.' url = SERVER+obj_id if DEBUG_ON: print "DEBUG: PATCH URL : %s" %(url) print "DEBUG: PATCH data: %s" %(json_payload) response = requests.patch(url, auth=(AUTHID, AUTHPW), data=json_payload, headers=POST_HEADERS) if DEBUG_ON: print "DEBUG: PATCH RESPONSE" print json.dumps(response.json(), indent=4, separators=(',', ': ')) if not response.status_code == 200: print >> sys.stderr, response.text return response.json()
def patch_ENCODE(obj_id, patch_input): '''PATCH an existing ENCODE object and return the response JSON ''' if isinstance(patch_input, dict): json_payload = json.dumps(patch_input) elif isinstance(patch_input, basestring): json_payload = patch_input else: print >> sys.stderr, 'Datatype to patch is not string or dict.' url = SERVER+obj_id if DEBUG_ON: print "DEBUG: PATCH URL : %s" %(url) print "DEBUG: PATCH data: %s" %(json_payload) response = requests.patch(url, auth=(AUTHID, AUTHPW), data=json_payload, headers=POST_HEADERS) if DEBUG_ON: print "DEBUG: PATCH RESPONSE" print json.dumps(response.json(), indent=4, separators=(',', ': ')) if not response.status_code == 200: print >> sys.stderr, response.text return response.json()
Python
def write_to_json(object_type, objects): """ Write experiment, biosample, and donor objects to json to different subdirectories of a folder. Log accessions to submission_script.log """ object_type = object_type + 's' print('STARTING {}'.format(object_type).upper()) accessions = [] for obj in objects: accession = obj['accession'] print(accession) accessions.append(accession) # file_path = "../{}/".format(object_type) + accession + "_modified.json" # file_path = "/Users/paul/geo-debug/new_script/{}/".format(object_type) + accession + "_modified.json" file_path = "../{}/{}_modified.json".format(object_type, accession) with open(file_path, "w") as file_out: file_out.write(json.dumps(obj, indent=4, sort_keys=True)) # Log accessions with open('submission_script.log', 'a') as f: f.write(strftime('%Y-%m-%d %H:%M')) f.write(' Outputted {}\n'.format(object_type)) for accession in accessions: f.write(accession+'\n') print('FINISHED {}'.format(object_type).upper())
def write_to_json(object_type, objects): """ Write experiment, biosample, and donor objects to json to different subdirectories of a folder. Log accessions to submission_script.log """ object_type = object_type + 's' print('STARTING {}'.format(object_type).upper()) accessions = [] for obj in objects: accession = obj['accession'] print(accession) accessions.append(accession) # file_path = "../{}/".format(object_type) + accession + "_modified.json" # file_path = "/Users/paul/geo-debug/new_script/{}/".format(object_type) + accession + "_modified.json" file_path = "../{}/{}_modified.json".format(object_type, accession) with open(file_path, "w") as file_out: file_out.write(json.dumps(obj, indent=4, sort_keys=True)) # Log accessions with open('submission_script.log', 'a') as f: f.write(strftime('%Y-%m-%d %H:%M')) f.write(' Outputted {}\n'.format(object_type)) for accession in accessions: f.write(accession+'\n') print('FINISHED {}'.format(object_type).upper())
Python
def make_list_from_str(list_as_str): """ Handles report fields coming from json arrays, including NaNs and comma- separated values. """ if not list_as_str or pd.isna(list_as_str): return [] else: return list_as_str.strip('[]').split(',')
def make_list_from_str(list_as_str): """ Handles report fields coming from json arrays, including NaNs and comma- separated values. """ if not list_as_str or pd.isna(list_as_str): return [] else: return list_as_str.strip('[]').split(',')
Python
def minimize_experiment(report_df, report_df_index, fields_conversion): """ Takes an experiment dataframe and minimizes the row given by report_df_index. The main feature of this function compared to other minimizers is the unique handling of the targets. """ obj = {} fields_direct_conversion = fields_conversion['direct'] fields_indirect_conversion = fields_conversion['indirect'] for column in report_df.columns: # columns whose values can be directly submitted if column in fields_direct_conversion: # don't submit fields with blank (NaN) values except for description if pd.isna(report_df.loc[report_df_index, column]): if column == 'Description': obj[fields_direct_conversion[column]] = '' continue obj[fields_direct_conversion[column]] = report_df.loc[report_df_index, column] # columns that need preprocessing elif column in fields_indirect_conversion: if column == 'Target label': if pd.isna(report_df.loc[report_df_index, column]): pass else: # Process target depending on if invesigated as control investigated_as = make_list_from_str(report_df.loc[report_df_index, 'target.investigated_as']) if 'control' in investigated_as: obj[fields_indirect_conversion[column]] = 'Control' else: obj[fields_indirect_conversion[column]] = report_df.loc[report_df_index, column] else: l = make_list_from_str(report_df.loc[report_df_index, column]) obj[fields_indirect_conversion[column]] = l return obj
def minimize_experiment(report_df, report_df_index, fields_conversion): """ Takes an experiment dataframe and minimizes the row given by report_df_index. The main feature of this function compared to other minimizers is the unique handling of the targets. """ obj = {} fields_direct_conversion = fields_conversion['direct'] fields_indirect_conversion = fields_conversion['indirect'] for column in report_df.columns: # columns whose values can be directly submitted if column in fields_direct_conversion: # don't submit fields with blank (NaN) values except for description if pd.isna(report_df.loc[report_df_index, column]): if column == 'Description': obj[fields_direct_conversion[column]] = '' continue obj[fields_direct_conversion[column]] = report_df.loc[report_df_index, column] # columns that need preprocessing elif column in fields_indirect_conversion: if column == 'Target label': if pd.isna(report_df.loc[report_df_index, column]): pass else: # Process target depending on if invesigated as control investigated_as = make_list_from_str(report_df.loc[report_df_index, 'target.investigated_as']) if 'control' in investigated_as: obj[fields_indirect_conversion[column]] = 'Control' else: obj[fields_indirect_conversion[column]] = report_df.loc[report_df_index, column] else: l = make_list_from_str(report_df.loc[report_df_index, column]) obj[fields_indirect_conversion[column]] = l return obj
Python
def build_biosamples(output_lists, fields_dict, fields_conversion, objs_by_type, objs_by_id): """ Takes a list of biosample accessions and gives list of biosample objects (dicts) for submission. Unlike build_donors, there are no subtypes of biosample, so this function is a little simpler than build_donors. """ print('Building biosample objects to submit') biosample_fields_list = fields_dict['biosample'] # holds donor objects to be submitted objs_to_output = [] # first pass: collect fields that we get ordered information from report biosamples = output_lists['biosamples'] if not biosamples: return objs_to_output biosample_df = get_report_tsv_from_fields('Biosample', biosamples, biosample_fields_list) # collect ids of properties we need to get separately for i in biosample_df.index: biosample_obj = minimize_donors_and_biosamples(biosample_df, i, fields_conversion) objs_to_output.append(biosample_obj) objs_by_type['document'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Documents']) objs_by_type['genetic_modification'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Applied modifications']) objs_by_type['biosample_characterization'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Characterizations']) objs_by_type['treatment'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Treatments']) objs_by_type['publication'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'References']) # get info that report has trouble handling get_objects('document', objs_by_type, objs_by_id) get_objects('publication', objs_by_type, objs_by_id) # need references.identifiers from donor_characterization report get_objects_w_report('biosample_characterization', objs_by_type, fields_dict, fields_conversion, objs_by_id) # need things like modifications_by_site_id.investigated_as get_objects_w_report('genetic_modification', objs_by_type, fields_dict, fields_conversion, objs_by_id) # get treatment objects corresponding to gms # for some reason these are unicode strings? get_objects('treatment', objs_by_type, objs_by_id) # backfill gm objects with treatments backfill_fields(objs_by_type['genetic_modification'].values(), objs_by_id) # add backfilled gms to objs_by_id for k, v in objs_by_type['genetic_modification'].items(): objs_by_id[k] = v # backfill all donor objects with gms, documents, and characterizations backfill_fields(objs_to_output, objs_by_id) return objs_to_output
def build_biosamples(output_lists, fields_dict, fields_conversion, objs_by_type, objs_by_id): """ Takes a list of biosample accessions and gives list of biosample objects (dicts) for submission. Unlike build_donors, there are no subtypes of biosample, so this function is a little simpler than build_donors. """ print('Building biosample objects to submit') biosample_fields_list = fields_dict['biosample'] # holds donor objects to be submitted objs_to_output = [] # first pass: collect fields that we get ordered information from report biosamples = output_lists['biosamples'] if not biosamples: return objs_to_output biosample_df = get_report_tsv_from_fields('Biosample', biosamples, biosample_fields_list) # collect ids of properties we need to get separately for i in biosample_df.index: biosample_obj = minimize_donors_and_biosamples(biosample_df, i, fields_conversion) objs_to_output.append(biosample_obj) objs_by_type['document'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Documents']) objs_by_type['genetic_modification'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Applied modifications']) objs_by_type['biosample_characterization'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Characterizations']) objs_by_type['treatment'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'Treatments']) objs_by_type['publication'][biosample_df.loc[i, 'Accession']] = make_list_from_str(biosample_df.loc[i, 'References']) # get info that report has trouble handling get_objects('document', objs_by_type, objs_by_id) get_objects('publication', objs_by_type, objs_by_id) # need references.identifiers from donor_characterization report get_objects_w_report('biosample_characterization', objs_by_type, fields_dict, fields_conversion, objs_by_id) # need things like modifications_by_site_id.investigated_as get_objects_w_report('genetic_modification', objs_by_type, fields_dict, fields_conversion, objs_by_id) # get treatment objects corresponding to gms # for some reason these are unicode strings? get_objects('treatment', objs_by_type, objs_by_id) # backfill gm objects with treatments backfill_fields(objs_by_type['genetic_modification'].values(), objs_by_id) # add backfilled gms to objs_by_id for k, v in objs_by_type['genetic_modification'].items(): objs_by_id[k] = v # backfill all donor objects with gms, documents, and characterizations backfill_fields(objs_to_output, objs_by_id) return objs_to_output
Python
def build_donors(output_lists, report_fields, fields_conversion, objs_by_type, objs_by_id): """ Takes a list of donors and gives list of donor objects (dicts) for submission. """ print('Building donor objects to submit') donor_fields = report_fields['donor'] # holds donor objects to be submitted objs_to_output = [] desired_species = ('Homo sapiens', 'Mus musculus', 'Drosophila melanogaster', 'Caenorhabditis elegans') # first pass: collect fields that we get ordered information from report for donor_type, donors in output_lists['donors'].items(): if not donors: continue donor_fields_list = donor_fields[donor_type] donor_df = get_report_tsv_from_fields(donor_type, donors, donor_fields_list) # donor_df.to_csv('/Users/paul/donor_fields_debug.tsv', sep = '\t') # collect ids of properties we need to get separately for i in donor_df.index: donor_accession = donor_df.loc[i, 'Accession'] if donor_df.loc[i, 'Organism'] not in desired_species: continue else: donor_obj = minimize_donors_and_biosamples(donor_df, i, fields_conversion) objs_to_output.append(donor_obj) objs_by_type['document'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Documents']) if donor_type != 'HumanDonor': objs_by_type['genetic_modification'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Genetic modifications']) objs_by_type['donor_characterization'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Characterizations']) objs_by_type['publication'][donor_accession] = make_list_from_str(donor_df.loc[i, 'References']) # get info that report has trouble handling get_objects('document', objs_by_type, objs_by_id) get_objects('publication', objs_by_type, objs_by_id) # need references.identifiers from donor_characterization report get_objects_w_report('donor_characterization', objs_by_type, report_fields, fields_conversion, objs_by_id) # need things like modifications_by_site_id.investigated_as get_objects_w_report('genetic_modification', objs_by_type, report_fields, fields_conversion, objs_by_id) # get treatment objects corresponding to gms get_objects('treatment', objs_by_type, objs_by_id) # backfill gm objects with treatments backfill_fields(objs_by_type['genetic_modification'].values(), objs_by_id) # add backfilled gms to objs_by_id for k, v in objs_by_type['genetic_modification'].items(): objs_by_id[k] = v # backfill all donor objects with gms, documents, and characterizations backfill_fields(objs_to_output, objs_by_id) return objs_to_output
def build_donors(output_lists, report_fields, fields_conversion, objs_by_type, objs_by_id): """ Takes a list of donors and gives list of donor objects (dicts) for submission. """ print('Building donor objects to submit') donor_fields = report_fields['donor'] # holds donor objects to be submitted objs_to_output = [] desired_species = ('Homo sapiens', 'Mus musculus', 'Drosophila melanogaster', 'Caenorhabditis elegans') # first pass: collect fields that we get ordered information from report for donor_type, donors in output_lists['donors'].items(): if not donors: continue donor_fields_list = donor_fields[donor_type] donor_df = get_report_tsv_from_fields(donor_type, donors, donor_fields_list) # donor_df.to_csv('/Users/paul/donor_fields_debug.tsv', sep = '\t') # collect ids of properties we need to get separately for i in donor_df.index: donor_accession = donor_df.loc[i, 'Accession'] if donor_df.loc[i, 'Organism'] not in desired_species: continue else: donor_obj = minimize_donors_and_biosamples(donor_df, i, fields_conversion) objs_to_output.append(donor_obj) objs_by_type['document'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Documents']) if donor_type != 'HumanDonor': objs_by_type['genetic_modification'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Genetic modifications']) objs_by_type['donor_characterization'][donor_accession] = make_list_from_str(donor_df.loc[i, 'Characterizations']) objs_by_type['publication'][donor_accession] = make_list_from_str(donor_df.loc[i, 'References']) # get info that report has trouble handling get_objects('document', objs_by_type, objs_by_id) get_objects('publication', objs_by_type, objs_by_id) # need references.identifiers from donor_characterization report get_objects_w_report('donor_characterization', objs_by_type, report_fields, fields_conversion, objs_by_id) # need things like modifications_by_site_id.investigated_as get_objects_w_report('genetic_modification', objs_by_type, report_fields, fields_conversion, objs_by_id) # get treatment objects corresponding to gms get_objects('treatment', objs_by_type, objs_by_id) # backfill gm objects with treatments backfill_fields(objs_by_type['genetic_modification'].values(), objs_by_id) # add backfilled gms to objs_by_id for k, v in objs_by_type['genetic_modification'].items(): objs_by_id[k] = v # backfill all donor objects with gms, documents, and characterizations backfill_fields(objs_to_output, objs_by_id) return objs_to_output
Python
def minimize_donors_and_biosamples(report_df, report_df_index, fields_conversion): """ This function generically converts the report tsvs into submittable objects, at least for biosamples and donors. Empty (NaN) fields are generally not included in the output objects. However, documents, references, characterizations, and genetic_modifications (for nonhuman donors) always appear in the output object as a list even if they are empty. """ # initialize output object obj_to_output = {} fields_direct_conversion = fields_conversion['direct'] fields_indirect_conversion = fields_conversion['indirect'] for column in report_df.columns: value = report_df.loc[report_df_index, column] # columns whose values can be directly submitted if column in fields_direct_conversion: # don't submit fields with blank (NaN) values if pd.isna(value): if column == 'Description': obj_to_output[fields_direct_conversion[column]] = '' continue elif column == 'Age': obj_to_output[fields_direct_conversion[column]] = str(value) else: obj_to_output[fields_direct_conversion[column]] = value # columns that need preprocessing elif column in fields_indirect_conversion: if column in ('Source', 'Donor'): # check if source is NaN if pd.isna(value): continue # convert from '/sources/source-name/' to 'source-name' parsed = value.split('/')[2] obj_to_output[fields_indirect_conversion[column]] = parsed elif column == 'Age': obj_to_output[fields_indirect_conversion[column]] = str(value) elif column == 'Passage number': if not pd.isna(value): obj_to_output[fields_indirect_conversion[column]] = int(value) else: l = make_list_from_str(value) obj_to_output[fields_indirect_conversion[column]] = l return obj_to_output
def minimize_donors_and_biosamples(report_df, report_df_index, fields_conversion): """ This function generically converts the report tsvs into submittable objects, at least for biosamples and donors. Empty (NaN) fields are generally not included in the output objects. However, documents, references, characterizations, and genetic_modifications (for nonhuman donors) always appear in the output object as a list even if they are empty. """ # initialize output object obj_to_output = {} fields_direct_conversion = fields_conversion['direct'] fields_indirect_conversion = fields_conversion['indirect'] for column in report_df.columns: value = report_df.loc[report_df_index, column] # columns whose values can be directly submitted if column in fields_direct_conversion: # don't submit fields with blank (NaN) values if pd.isna(value): if column == 'Description': obj_to_output[fields_direct_conversion[column]] = '' continue elif column == 'Age': obj_to_output[fields_direct_conversion[column]] = str(value) else: obj_to_output[fields_direct_conversion[column]] = value # columns that need preprocessing elif column in fields_indirect_conversion: if column in ('Source', 'Donor'): # check if source is NaN if pd.isna(value): continue # convert from '/sources/source-name/' to 'source-name' parsed = value.split('/')[2] obj_to_output[fields_indirect_conversion[column]] = parsed elif column == 'Age': obj_to_output[fields_indirect_conversion[column]] = str(value) elif column == 'Passage number': if not pd.isna(value): obj_to_output[fields_indirect_conversion[column]] = int(value) else: l = make_list_from_str(value) obj_to_output[fields_indirect_conversion[column]] = l return obj_to_output
Python
def backfill_fields(objs_to_output, objs_by_id, is_experiment=False, is_document=False): """ Fields of objects in objs_to_output containing lists of @ids are populated with the desired subobjects corresponding to the @ids with the appropriate minimizations, if needed. """ for obj in objs_to_output: for field in obj: if field == 'documents': if is_experiment: obj[field] = [minimize_experiment_document(objs_by_id[obj_id]) for obj_id in obj[field]] if not is_experiment: obj[field] = [minimize_document(objs_by_id[obj_id]) for obj_id in obj[field]] if field == 'treatments': obj[field] = [minimize_treatment(objs_by_id[obj_id]) for obj_id in obj[field]] # Need to get to library in replicate if field == 'library': if 'spikeins_used' in obj[field]: obj[field]['spikeins_used'] = [minimize_reference(objs_by_id[obj_id]) for obj_id in obj[field]['spikeins_used']] if 'documents' in obj[field]: obj[field]['documents'] = [minimize_experiment_document(objs_by_id[obj_id]) for obj_id in obj[field]['documents']] if field == 'references' and is_document: # We need a dictionary of references in format @id:[identifier(s)] obj[field] = {obj_id:minimize_publication(objs_by_id[obj_id]) for obj_id in obj[field]} if field == 'references' and not is_document: obj[field] = [minimize_publication(objs_by_id[obj_id]) for obj_id in obj[field]] if field == 'replicates': # handle situation where replicate is not released and not in objs_by_id output = [] for obj_id in obj[field]: if obj_id not in objs_by_id: continue else: output.append(objs_by_id[obj_id]) obj[field] = output if field in ('characterizations', 'genetic_modifications', 'applied_modifications', 'files'): # these objects have already been minimized obj[field] = [objs_by_id[obj_id] for obj_id in obj[field]]
def backfill_fields(objs_to_output, objs_by_id, is_experiment=False, is_document=False): """ Fields of objects in objs_to_output containing lists of @ids are populated with the desired subobjects corresponding to the @ids with the appropriate minimizations, if needed. """ for obj in objs_to_output: for field in obj: if field == 'documents': if is_experiment: obj[field] = [minimize_experiment_document(objs_by_id[obj_id]) for obj_id in obj[field]] if not is_experiment: obj[field] = [minimize_document(objs_by_id[obj_id]) for obj_id in obj[field]] if field == 'treatments': obj[field] = [minimize_treatment(objs_by_id[obj_id]) for obj_id in obj[field]] # Need to get to library in replicate if field == 'library': if 'spikeins_used' in obj[field]: obj[field]['spikeins_used'] = [minimize_reference(objs_by_id[obj_id]) for obj_id in obj[field]['spikeins_used']] if 'documents' in obj[field]: obj[field]['documents'] = [minimize_experiment_document(objs_by_id[obj_id]) for obj_id in obj[field]['documents']] if field == 'references' and is_document: # We need a dictionary of references in format @id:[identifier(s)] obj[field] = {obj_id:minimize_publication(objs_by_id[obj_id]) for obj_id in obj[field]} if field == 'references' and not is_document: obj[field] = [minimize_publication(objs_by_id[obj_id]) for obj_id in obj[field]] if field == 'replicates': # handle situation where replicate is not released and not in objs_by_id output = [] for obj_id in obj[field]: if obj_id not in objs_by_id: continue else: output.append(objs_by_id[obj_id]) obj[field] = output if field in ('characterizations', 'genetic_modifications', 'applied_modifications', 'files'): # these objects have already been minimized obj[field] = [objs_by_id[obj_id] for obj_id in obj[field]]
Python
def minimize_reference(reference): """ Takes a reference spikein dataset object and strips it down to a subset of its fields. """ reference_fields = ['accession', 'dbxrefs', 'description'] minimized_reference = {field:reference[field] for field in reference_fields if field in reference} return minimized_reference
def minimize_reference(reference): """ Takes a reference spikein dataset object and strips it down to a subset of its fields. """ reference_fields = ['accession', 'dbxrefs', 'description'] minimized_reference = {field:reference[field] for field in reference_fields if field in reference} return minimized_reference
Python
def minimize_experiment_document(document): """ Takes a document belonging to an experiment or to a library in an experiment and strips it down to a subset of desired fields. This differs from other non-experiment documents in that the attachment is a dictionary rather than a simple string concatenation of document @id and hrefself. """ minimized_document = {} for key in ('document_type', 'urls', 'references', 'attachment'): if key in document: if key == 'attachment': minimized_document[key] = minimize_attachment(document[key], document['@id']) else: minimized_document[key] = document[key] return minimized_document
def minimize_experiment_document(document): """ Takes a document belonging to an experiment or to a library in an experiment and strips it down to a subset of desired fields. This differs from other non-experiment documents in that the attachment is a dictionary rather than a simple string concatenation of document @id and hrefself. """ minimized_document = {} for key in ('document_type', 'urls', 'references', 'attachment'): if key in document: if key == 'attachment': minimized_document[key] = minimize_attachment(document[key], document['@id']) else: minimized_document[key] = document[key] return minimized_document
Python
def minimize_attachment(attachment, doc_id): """ Takes an attachment obtained from a document in an experiment object and strips it down to a subset of desired fields. The document @id, given by doc_id, is prepended to the attachment href. """ minimized_attachment = {} for key in ('md5sum', 'href'): if key in attachment: if key == 'href': minimized_attachment[key]=doc_id + attachment[key] elif key == 'md5sum': minimized_attachment[key] = attachment[key] return minimized_attachment
def minimize_attachment(attachment, doc_id): """ Takes an attachment obtained from a document in an experiment object and strips it down to a subset of desired fields. The document @id, given by doc_id, is prepended to the attachment href. """ minimized_attachment = {} for key in ('md5sum', 'href'): if key in attachment: if key == 'href': minimized_attachment[key]=doc_id + attachment[key] elif key == 'md5sum': minimized_attachment[key] = attachment[key] return minimized_attachment
Python
def minimize_document(document): """ Takes a document obtained directly from its json from the portal and strips it down to a subset of desired fields. The document @id is prepended to the attachment href """ minimized_document = {} for field in ('document_type', 'urls', 'references'): if field in document: minimized_document[field] = document[field] if 'attachment' in document: minimized_document['attachment'] = document['@id'] + document['attachment']['href'] return minimized_document
def minimize_document(document): """ Takes a document obtained directly from its json from the portal and strips it down to a subset of desired fields. The document @id is prepended to the attachment href """ minimized_document = {} for field in ('document_type', 'urls', 'references'): if field in document: minimized_document[field] = document[field] if 'attachment' in document: minimized_document['attachment'] = document['@id'] + document['attachment']['href'] return minimized_document
Python
def minimize_treatment(treatment): """ Takes a treatment obtained directly from its json from the portal and strips it down to a subset of desired fields. """ minimized_treatment = {} for key in ['treatment_type','dbxrefs','treatment_term_name','treatment_term_id','concentration', 'concentration_units','duration','duration_units','temperature','temperature_units']: if key in treatment.keys(): minimized_treatment[key]=treatment[key] return minimized_treatment
def minimize_treatment(treatment): """ Takes a treatment obtained directly from its json from the portal and strips it down to a subset of desired fields. """ minimized_treatment = {} for key in ['treatment_type','dbxrefs','treatment_term_name','treatment_term_id','concentration', 'concentration_units','duration','duration_units','temperature','temperature_units']: if key in treatment.keys(): minimized_treatment[key]=treatment[key] return minimized_treatment
Python
def minimize_gms(gm_df, fields_conversion, objs_by_type, objs_by_id): """ Takes a dataframe containing gms and prepares objects containing most gm properties (except treatments, which are backfilled later). Both gms and treatments are added to objs_by_id, since they are both backfilled into other objects later. """ # gm_list = objs_by_type['genetic_modification'] gm_fields_conversion = fields_conversion['genetic_modification'] for i in gm_df.index: minimized_gm = {} for column in gm_df.columns: value = gm_df.loc[i, column] if pd.isna(value): # Filter out fields with no value continue if column in ['Purpose', 'Category', 'Method','Modification zygosity']: # Insert field/value pair into output object that don't need conversion minimized_gm[gm_fields_conversion[column]] = value if column == 'Modification site': # The raw value is given by the report as a string corresponding to a json object processed = ast.literal_eval(value) minimized_gm[gm_fields_conversion[column]] = processed if column in ('Introduced protein tags', 'Reagents'): # The raw data is a string corresponding to a list of json objects, # although without enclosing brackets raw_data = '[' + value + ']' processed = ast.literal_eval(raw_data) minimized_gm[gm_fields_conversion[column]] = processed if column == 'Treatments': # Needs conversion to a list values_list = make_list_from_str(value) gm_accession = gm_df.loc[i, 'ID'].split('/')[2] # add treatments to objects to get objs_by_type['treatment'][gm_accession] = values_list minimized_gm[gm_fields_conversion[column]] = values_list if column == 'modified_site_by_target_id.label': # Check if control target, if it is not, add target label to gm object if gm_df.loc[i, 'modified_site_by_target_id.investigated_as'] != 'control': minimized_gm[gm_fields_conversion[column]] = value # objs_by_id[gm_df.loc[i, 'ID']] = minimized_gm objs_by_type['genetic_modification'][gm_df.loc[i, 'ID']] = minimized_gm
def minimize_gms(gm_df, fields_conversion, objs_by_type, objs_by_id): """ Takes a dataframe containing gms and prepares objects containing most gm properties (except treatments, which are backfilled later). Both gms and treatments are added to objs_by_id, since they are both backfilled into other objects later. """ # gm_list = objs_by_type['genetic_modification'] gm_fields_conversion = fields_conversion['genetic_modification'] for i in gm_df.index: minimized_gm = {} for column in gm_df.columns: value = gm_df.loc[i, column] if pd.isna(value): # Filter out fields with no value continue if column in ['Purpose', 'Category', 'Method','Modification zygosity']: # Insert field/value pair into output object that don't need conversion minimized_gm[gm_fields_conversion[column]] = value if column == 'Modification site': # The raw value is given by the report as a string corresponding to a json object processed = ast.literal_eval(value) minimized_gm[gm_fields_conversion[column]] = processed if column in ('Introduced protein tags', 'Reagents'): # The raw data is a string corresponding to a list of json objects, # although without enclosing brackets raw_data = '[' + value + ']' processed = ast.literal_eval(raw_data) minimized_gm[gm_fields_conversion[column]] = processed if column == 'Treatments': # Needs conversion to a list values_list = make_list_from_str(value) gm_accession = gm_df.loc[i, 'ID'].split('/')[2] # add treatments to objects to get objs_by_type['treatment'][gm_accession] = values_list minimized_gm[gm_fields_conversion[column]] = values_list if column == 'modified_site_by_target_id.label': # Check if control target, if it is not, add target label to gm object if gm_df.loc[i, 'modified_site_by_target_id.investigated_as'] != 'control': minimized_gm[gm_fields_conversion[column]] = value # objs_by_id[gm_df.loc[i, 'ID']] = minimized_gm objs_by_type['genetic_modification'][gm_df.loc[i, 'ID']] = minimized_gm
Python
def minimize_files(file_df, fields_conversion, objs_by_type, objs_by_id): """This is for the purpose of minimizing files to incorporate into experiment objects, not to submit to GEO""" file_fields_conversion = fields_conversion['file'] for i in file_df.index: file_dict = {} platform = {} replicate = {} for column in file_df.columns: value = file_df.loc[i, column] if column == 'Alternate accessions': # This always appears in output file object, need to check first before filtering out NaNs values_list = make_list_from_str(value) file_dict[file_fields_conversion[column]] = values_list elif column == 'platform.dbxrefs': # Want to add this to platform to avoid being filtered out if NaN platform[file_fields_conversion[column]] = make_list_from_str(value) elif pd.isna(value) or column in ('ID', 'Platform', 'Replicate'): continue elif column in ('File size', 'Read length'):#, 'Paired End Identifier'): file_dict[file_fields_conversion[column]] = int(value) elif column == 'Paired end identifier': # In original script, value is outputted as string file_dict[file_fields_conversion[column]] = str(value).rstrip('.0') elif column == 'Derived from': values_list = make_list_from_str(value) file_dict[file_fields_conversion[column]] = values_list elif column == 'platform.term_name': platform[file_fields_conversion[column]] = value elif column in ('replicate.biological_replicate_number', 'replicate.technical_replicate_number'): replicate[file_fields_conversion[column]] = int(value) else: file_dict[file_fields_conversion[column]] = value # elif column == 'platform.dbxrefs': # platform[file_fields_conversion[column]] = make_list_from_str(file_df.loc[i, column]) if not pd.isna(file_df.loc[i, 'Platform']): file_dict['platform'] = platform if not pd.isna(file_df.loc[i, 'Replicate']): file_dict['replicate'] = replicate objs_by_id[file_df.loc[i, 'ID']] = file_dict
def minimize_files(file_df, fields_conversion, objs_by_type, objs_by_id): """This is for the purpose of minimizing files to incorporate into experiment objects, not to submit to GEO""" file_fields_conversion = fields_conversion['file'] for i in file_df.index: file_dict = {} platform = {} replicate = {} for column in file_df.columns: value = file_df.loc[i, column] if column == 'Alternate accessions': # This always appears in output file object, need to check first before filtering out NaNs values_list = make_list_from_str(value) file_dict[file_fields_conversion[column]] = values_list elif column == 'platform.dbxrefs': # Want to add this to platform to avoid being filtered out if NaN platform[file_fields_conversion[column]] = make_list_from_str(value) elif pd.isna(value) or column in ('ID', 'Platform', 'Replicate'): continue elif column in ('File size', 'Read length'):#, 'Paired End Identifier'): file_dict[file_fields_conversion[column]] = int(value) elif column == 'Paired end identifier': # In original script, value is outputted as string file_dict[file_fields_conversion[column]] = str(value).rstrip('.0') elif column == 'Derived from': values_list = make_list_from_str(value) file_dict[file_fields_conversion[column]] = values_list elif column == 'platform.term_name': platform[file_fields_conversion[column]] = value elif column in ('replicate.biological_replicate_number', 'replicate.technical_replicate_number'): replicate[file_fields_conversion[column]] = int(value) else: file_dict[file_fields_conversion[column]] = value # elif column == 'platform.dbxrefs': # platform[file_fields_conversion[column]] = make_list_from_str(file_df.loc[i, column]) if not pd.isna(file_df.loc[i, 'Platform']): file_dict['platform'] = platform if not pd.isna(file_df.loc[i, 'Replicate']): file_dict['replicate'] = replicate objs_by_id[file_df.loc[i, 'ID']] = file_dict
Python
def minimize_replicates(rep_df, fields_conversion, objs_by_type, objs_by_id): """ Convert dataframe comtaining rows with replicate and associated library metadata into a replicate object containing a library subobject. """ rep_fields_conversion = fields_conversion['replicate'] for i in rep_df.index: if rep_df.loc[i, 'Status'] not in ['released', 'archived']: continue replicate = {} library = {} library_accession = rep_df.loc[i, 'library.@id'].split('/')[2] for column in rep_df.columns: value = rep_df.loc[i, column] if column == 'library.spikeins_used': # Always want spikeins list in library, even if empty (NaN in report) values = make_list_from_str(value) library[rep_fields_conversion[column]] = values objs_by_type['reference'][library_accession] = values elif column == 'library.documents': values = make_list_from_str(value) library[rep_fields_conversion[column]] = values objs_by_type['document'][library_accession] = values elif pd.isna(value):# or column in ('ID', 'Platform', 'Replicate'): continue elif column in ('Biological replicate', 'Technical replicate'): replicate[rep_fields_conversion[column]] = int(value) elif column.startswith('library'): if column in ('library.biosample.status', 'library.@id', 'library.status'): continue elif column == 'library.biosample.accession': if rep_df.loc[i, 'library.biosample.status'] in ['released', 'archived']: library['biosample'] = value elif column == 'library.nucleic_acid_starting_quantity': library[rep_fields_conversion[column]] = str(int(value)) elif column == 'library.fragmentation_methods': fragmentation_methods = make_list_from_str(value) library[rep_fields_conversion[column]] = ', '.join(fragmentation_methods) else: library[rep_fields_conversion[column]] = value else: # Other columns don't make it into outputted experiment json pass if rep_df.loc[i, 'library.status'] in ['released', 'archived']: replicate['library'] = library objs_by_type['replicate'][rep_df.loc[i, 'ID']] = replicate
def minimize_replicates(rep_df, fields_conversion, objs_by_type, objs_by_id): """ Convert dataframe comtaining rows with replicate and associated library metadata into a replicate object containing a library subobject. """ rep_fields_conversion = fields_conversion['replicate'] for i in rep_df.index: if rep_df.loc[i, 'Status'] not in ['released', 'archived']: continue replicate = {} library = {} library_accession = rep_df.loc[i, 'library.@id'].split('/')[2] for column in rep_df.columns: value = rep_df.loc[i, column] if column == 'library.spikeins_used': # Always want spikeins list in library, even if empty (NaN in report) values = make_list_from_str(value) library[rep_fields_conversion[column]] = values objs_by_type['reference'][library_accession] = values elif column == 'library.documents': values = make_list_from_str(value) library[rep_fields_conversion[column]] = values objs_by_type['document'][library_accession] = values elif pd.isna(value):# or column in ('ID', 'Platform', 'Replicate'): continue elif column in ('Biological replicate', 'Technical replicate'): replicate[rep_fields_conversion[column]] = int(value) elif column.startswith('library'): if column in ('library.biosample.status', 'library.@id', 'library.status'): continue elif column == 'library.biosample.accession': if rep_df.loc[i, 'library.biosample.status'] in ['released', 'archived']: library['biosample'] = value elif column == 'library.nucleic_acid_starting_quantity': library[rep_fields_conversion[column]] = str(int(value)) elif column == 'library.fragmentation_methods': fragmentation_methods = make_list_from_str(value) library[rep_fields_conversion[column]] = ', '.join(fragmentation_methods) else: library[rep_fields_conversion[column]] = value else: # Other columns don't make it into outputted experiment json pass if rep_df.loc[i, 'library.status'] in ['released', 'archived']: replicate['library'] = library objs_by_type['replicate'][rep_df.loc[i, 'ID']] = replicate
Python
def build(self): """ Builds a ContactCard (.vcf) file and returns it given the instances parameters. :return: Card (file - .vcf). """ # Clean all fields. self._clean_fields() # Build
def build(self): """ Builds a ContactCard (.vcf) file and returns it given the instances parameters. :return: Card (file - .vcf). """ # Clean all fields. self._clean_fields() # Build
Python
def convert_to_e164(raw_phone): """ Converts a raw input to a phone number, verified based on the e164 standard. For e164 see: <https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-E.164-201011-I!!PDF-E&type=items> From: <http://twilio-python.readthedocs.io/en/latest/faq.html> :param raw_phone: (str) Phone Number. :return: Verified, formatted e164 number. """ try: if not raw_phone: return # TODO: Handle international phone numbers. if raw_phone[0] == '+': # Phone number may already be in E.164 format. parse_type = None else: # If no country code information present, assume it's a US number parse_type = "US" phone_representation = phonenumbers.parse(raw_phone, parse_type) return phonenumbers.format_number(phone_representation, phonenumbers.PhoneNumberFormat.E164) except phonenumbers.NumberParseException: raise ContactCardException( "The phone number supplied doesn't look like a valid phone number." ) except Exception as e: raise ContactCardException( "Exception when converting phone number to e164." )
def convert_to_e164(raw_phone): """ Converts a raw input to a phone number, verified based on the e164 standard. For e164 see: <https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-E.164-201011-I!!PDF-E&type=items> From: <http://twilio-python.readthedocs.io/en/latest/faq.html> :param raw_phone: (str) Phone Number. :return: Verified, formatted e164 number. """ try: if not raw_phone: return # TODO: Handle international phone numbers. if raw_phone[0] == '+': # Phone number may already be in E.164 format. parse_type = None else: # If no country code information present, assume it's a US number parse_type = "US" phone_representation = phonenumbers.parse(raw_phone, parse_type) return phonenumbers.format_number(phone_representation, phonenumbers.PhoneNumberFormat.E164) except phonenumbers.NumberParseException: raise ContactCardException( "The phone number supplied doesn't look like a valid phone number." ) except Exception as e: raise ContactCardException( "Exception when converting phone number to e164." )
Python
def extended_api(self, model: M) -> M: """ Hook any API model to the HTTP provider, to use custom APIs :param model: the model, any APIEndpointFn, VariablePathSegmentFn or just a class with methods annotated as such, or decorated as such. Basically anything that can be understood as API model. :return: An Eth2EndpointImpl which shadows the model, implementing it by calling HTTP functions. """ root_endpoint = Eth2EndpointImpl(self, APIPath(''), model) return cast(model, root_endpoint)
def extended_api(self, model: M) -> M: """ Hook any API model to the HTTP provider, to use custom APIs :param model: the model, any APIEndpointFn, VariablePathSegmentFn or just a class with methods annotated as such, or decorated as such. Basically anything that can be understood as API model. :return: An Eth2EndpointImpl which shadows the model, implementing it by calling HTTP functions. """ root_endpoint = Eth2EndpointImpl(self, APIPath(''), model) return cast(model, root_endpoint)
Python
def monthLogs(self): """ The logs for each month, over the appropriate timespan. """ retval = [] numMembersMonthEnd = len(FullMembers(self.groupInfo.groupObj)) for year in self.years: latestMonth = 12 earliestMonth = 1 if year == self.years[0]: latestMonth = date.today().month if year == self.years[-1]: earliestMonth = min(self.events[year].keys()) for month in range(latestMonth, (earliestMonth - 1), -1): events = {} if (year in self.events) and (month in self.events[year]): events = self.events[year][month] monthLog = MonthLog(self.groupInfo, year, month, numMembersMonthEnd, events) retval.append(monthLog) numMembersMonthEnd = monthLog.numMembersMonthStart return retval
def monthLogs(self): """ The logs for each month, over the appropriate timespan. """ retval = [] numMembersMonthEnd = len(FullMembers(self.groupInfo.groupObj)) for year in self.years: latestMonth = 12 earliestMonth = 1 if year == self.years[0]: latestMonth = date.today().month if year == self.years[-1]: earliestMonth = min(self.events[year].keys()) for month in range(latestMonth, (earliestMonth - 1), -1): events = {} if (year in self.events) and (month in self.events[year]): events = self.events[year][month] monthLog = MonthLog(self.groupInfo, year, month, numMembersMonthEnd, events) retval.append(monthLog) numMembersMonthEnd = monthLog.numMembersMonthStart return retval
Python
def kubernetes_versions(region): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" output = subprocess.check_output( [ "gcloud", "container", "get-server-config", "--region", region, "--format=json", ] ) data = json.loads(output.decode("utf-8")) supported_kubernetes_version = sorted([_ for _ in data["validMasterVersions"]]) return supported_kubernetes_version
def kubernetes_versions(region): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" output = subprocess.check_output( [ "gcloud", "container", "get-server-config", "--region", region, "--format=json", ] ) data = json.loads(output.decode("utf-8")) supported_kubernetes_version = sorted([_ for _ in data["validMasterVersions"]]) return supported_kubernetes_version
Python
def kubernetes_versions(region=None): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" supported_kubernetes_versions = sorted( [_["slug"] for _ in _kubernetes_options()["options"]["versions"]] ) return supported_kubernetes_versions
def kubernetes_versions(region=None): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" supported_kubernetes_versions = sorted( [_["slug"] for _ in _kubernetes_options()["options"]["versions"]] ) return supported_kubernetes_versions
Python
def kubernetes_versions(region="Central US"): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" client = initiate_container_service_client() azure_location = region.replace(" ", "").lower() k8s_versions_list = client.container_services.list_orchestrators( azure_location, resource_type="managedClusters" ).as_dict() supported_kubernetes_versions = [] for key in k8s_versions_list["orchestrators"]: if key["orchestrator_type"] == "Kubernetes": supported_kubernetes_versions.append(key["orchestrator_version"]) supported_kubernetes_versions = sorted(supported_kubernetes_versions) return supported_kubernetes_versions
def kubernetes_versions(region="Central US"): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" client = initiate_container_service_client() azure_location = region.replace(" ", "").lower() k8s_versions_list = client.container_services.list_orchestrators( azure_location, resource_type="managedClusters" ).as_dict() supported_kubernetes_versions = [] for key in k8s_versions_list["orchestrators"]: if key["orchestrator_type"] == "Kubernetes": supported_kubernetes_versions.append(key["orchestrator_version"]) supported_kubernetes_versions = sorted(supported_kubernetes_versions) return supported_kubernetes_versions
Python
def kubernetes_versions(region="us-west-2"): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" # AWS SDK (boto3) currently doesn't offer an intuitive way to list available kubernetes version. This implementation grabs kubernetes versions for specific EKS addons. It will therefore always be (at the very least) a subset of all kubernetes versions still supported by AWS. if not os.getenv("AWS_DEFAULT_REGION"): os.environ["AWS_DEFAULT_REGION"] = region client = boto3.client("eks") supported_kubernetes_versions = list() available_addons = client.describe_addon_versions() for addon in available_addons.get("addons", None): for eksbuild in addon.get("addonVersions", None): for k8sversion in eksbuild.get("compatibilities", None): supported_kubernetes_versions.append( k8sversion.get("clusterVersion", None) ) supported_kubernetes_versions = sorted(list(set(supported_kubernetes_versions))) return supported_kubernetes_versions
def kubernetes_versions(region="us-west-2"): """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" # AWS SDK (boto3) currently doesn't offer an intuitive way to list available kubernetes version. This implementation grabs kubernetes versions for specific EKS addons. It will therefore always be (at the very least) a subset of all kubernetes versions still supported by AWS. if not os.getenv("AWS_DEFAULT_REGION"): os.environ["AWS_DEFAULT_REGION"] = region client = boto3.client("eks") supported_kubernetes_versions = list() available_addons = client.describe_addon_versions() for addon in available_addons.get("addons", None): for eksbuild in addon.get("addonVersions", None): for k8sversion in eksbuild.get("compatibilities", None): supported_kubernetes_versions.append( k8sversion.get("clusterVersion", None) ) supported_kubernetes_versions = sorted(list(set(supported_kubernetes_versions))) return supported_kubernetes_versions
Python
def clipseq_metrics_df( analysis_dir, percent_usable, number_usable, iclip=False, num_seps=None, sep=".", cutadapt_round2_suffix="*fqTrTr.metrics", rm_dup_suffix="*.outSo.rmDup.metrics", peak_suffix="*.peakClusters.bed" ): ####################################### """ Reports all clip-seq metrics in a given analysis directory outputs must follow gabes naming clipseq pipeline / naming conventions" Args: analysis_dir: iclip: num_seps: sep: percent_usable: number_usable: Returns: """ # TODO: fix prefix name separator if num_seps is None: num_seps = 3 if iclip else 3 cutadapt_round2_names, rm_duped_names, peaks_names = get_all_names( analysis_dir, cutadapt_round2_suffix, rm_dup_suffix, peak_suffix, sep, num_seps ) ########################################################################### # make dataframes ################# cutadapt_round2_df = pd.DataFrame( { name: parse_cutadapt_file(cutadapt_file) for name, cutadapt_file in cutadapt_round2_names.items() } ).transpose() cutadapt_round2_df.columns = [ "{} Round 2".format(col) for col in cutadapt_round2_df.columns ] rm_duped_df = pd.DataFrame( {name: parse_rm_duped_metrics_file(rm_duped_file) for name, rm_duped_file in rm_duped_names.items()} ).transpose() peaks_df = pd.DataFrame( {name: {"Clipper peaks num": len(pybedtools.BedTool(peaks_file))} for name, peaks_file in peaks_names.items()} ).transpose() ########################################################################### ########################################################################### # get rnaseq metrics dataframe ############################## combined_df = rnaseq_metrics_df(analysis_dir, num_seps, sep) ########################################################################### ########################################################################### # merge dataframes ################## combined_df = pd.merge(combined_df, cutadapt_round2_df, left_index=True, right_index=True, how="outer") combined_df = pd.merge(combined_df, rm_duped_df, left_index=True, right_index=True, how="outer") combined_df = pd.merge(combined_df, peaks_df, left_index=True, right_index=True, how="outer") ########################################################################### # Rename columns to be useful combined_df = combined_df.rename( columns={"Reads Written Round 2": "Reads after cutadapt 2" }) ########################################################################### # compute useful stats ###################### combined_df['STAR genome uniquely mapped'] = combined_df['STAR genome uniquely mapped'].astype(float) combined_df['Initial reads num'] = combined_df['Initial reads num'].astype(float) try: combined_df["Percent usable / mapped"] = (combined_df['Usable reads'] / combined_df['STAR genome uniquely mapped']) combined_df["Percent Usable / Input"] = (combined_df['Usable reads'] / combined_df['Initial reads num']) combined_df["Percent Repetitive"] = 1 - (combined_df['STAR genome input reads'] / combined_df['Reads after cutadapt 2'].astype(float)) combined_df["Repetitive Reads"] = combined_df['Reads after cutadapt 2'] - combined_df['STAR genome input reads'] combined_df['Passed basic QC'] = (combined_df['Usable reads'] > number_usable) & (combined_df['Percent usable / mapped'] > percent_usable) except ZeroDivisionError: print("passing on ZeroDivisionError") pass return combined_df
def clipseq_metrics_df( analysis_dir, percent_usable, number_usable, iclip=False, num_seps=None, sep=".", cutadapt_round2_suffix="*fqTrTr.metrics", rm_dup_suffix="*.outSo.rmDup.metrics", peak_suffix="*.peakClusters.bed" ): ####################################### """ Reports all clip-seq metrics in a given analysis directory outputs must follow gabes naming clipseq pipeline / naming conventions" Args: analysis_dir: iclip: num_seps: sep: percent_usable: number_usable: Returns: """ # TODO: fix prefix name separator if num_seps is None: num_seps = 3 if iclip else 3 cutadapt_round2_names, rm_duped_names, peaks_names = get_all_names( analysis_dir, cutadapt_round2_suffix, rm_dup_suffix, peak_suffix, sep, num_seps ) ########################################################################### # make dataframes ################# cutadapt_round2_df = pd.DataFrame( { name: parse_cutadapt_file(cutadapt_file) for name, cutadapt_file in cutadapt_round2_names.items() } ).transpose() cutadapt_round2_df.columns = [ "{} Round 2".format(col) for col in cutadapt_round2_df.columns ] rm_duped_df = pd.DataFrame( {name: parse_rm_duped_metrics_file(rm_duped_file) for name, rm_duped_file in rm_duped_names.items()} ).transpose() peaks_df = pd.DataFrame( {name: {"Clipper peaks num": len(pybedtools.BedTool(peaks_file))} for name, peaks_file in peaks_names.items()} ).transpose() ########################################################################### ########################################################################### # get rnaseq metrics dataframe ############################## combined_df = rnaseq_metrics_df(analysis_dir, num_seps, sep) ########################################################################### ########################################################################### # merge dataframes ################## combined_df = pd.merge(combined_df, cutadapt_round2_df, left_index=True, right_index=True, how="outer") combined_df = pd.merge(combined_df, rm_duped_df, left_index=True, right_index=True, how="outer") combined_df = pd.merge(combined_df, peaks_df, left_index=True, right_index=True, how="outer") ########################################################################### # Rename columns to be useful combined_df = combined_df.rename( columns={"Reads Written Round 2": "Reads after cutadapt 2" }) ########################################################################### # compute useful stats ###################### combined_df['STAR genome uniquely mapped'] = combined_df['STAR genome uniquely mapped'].astype(float) combined_df['Initial reads num'] = combined_df['Initial reads num'].astype(float) try: combined_df["Percent usable / mapped"] = (combined_df['Usable reads'] / combined_df['STAR genome uniquely mapped']) combined_df["Percent Usable / Input"] = (combined_df['Usable reads'] / combined_df['Initial reads num']) combined_df["Percent Repetitive"] = 1 - (combined_df['STAR genome input reads'] / combined_df['Reads after cutadapt 2'].astype(float)) combined_df["Repetitive Reads"] = combined_df['Reads after cutadapt 2'] - combined_df['STAR genome input reads'] combined_df['Passed basic QC'] = (combined_df['Usable reads'] > number_usable) & (combined_df['Percent usable / mapped'] > percent_usable) except ZeroDivisionError: print("passing on ZeroDivisionError") pass return combined_df
Python
def parse_peak_metrics(fn): """ Unused function that has parsed/will parse CLIPPER metrics. :param fn: basestring :return spot_dict: dict """ with open(fn) as file_handle: file_handle.next() return {'spot': float(file_handle.next())}
def parse_peak_metrics(fn): """ Unused function that has parsed/will parse CLIPPER metrics. :param fn: basestring :return spot_dict: dict """ with open(fn) as file_handle: file_handle.next() return {'spot': float(file_handle.next())}
Python
def read_parsed(fn): """ Reads Eric's parsed file from the repetitive element pipeline. Parameters ---------- fn : basestring the *.parsed file Returns ------- total_df : pandas.DataFrame dataframe of total reads per unique/repetitive element family. element_df : pandas.DataFrame dataframe of unique repetitive/unique elements that each unique read mapped to. total_reads : int total_genomic_reads : int total_usable_reads : int total_repfamily_reads : int """ df = pd.read_table(fn, names=[ 'total_or_element', 'element', 'read_num', 'clip_rpr', 'annotation', 'gene' ]) try: total_reads = df[ (df['total_or_element'] == '#READINFO') & (df['element'] == 'AllReads') ]['read_num'].values[0] except IndexError: # the re-parsed files don't have this row... total_reads = 0 total_genomic_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'GenomicReads') ]['read_num'].values[0] total_usable_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'UsableReads') ]['read_num'].values[0] total_repfamily_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'RepFamilyReads') ]['read_num'].values[0] total_df = df[df['total_or_element'] == 'TOTAL'][ ['element', 'read_num', 'clip_rpr'] ] element_df = df[df['total_or_element'] == 'ELEMENT'][ ['element', 'read_num', 'clip_rpr'] ] return total_df, element_df, \ total_reads, total_genomic_reads, \ total_usable_reads, total_repfamily_reads
def read_parsed(fn): """ Reads Eric's parsed file from the repetitive element pipeline. Parameters ---------- fn : basestring the *.parsed file Returns ------- total_df : pandas.DataFrame dataframe of total reads per unique/repetitive element family. element_df : pandas.DataFrame dataframe of unique repetitive/unique elements that each unique read mapped to. total_reads : int total_genomic_reads : int total_usable_reads : int total_repfamily_reads : int """ df = pd.read_table(fn, names=[ 'total_or_element', 'element', 'read_num', 'clip_rpr', 'annotation', 'gene' ]) try: total_reads = df[ (df['total_or_element'] == '#READINFO') & (df['element'] == 'AllReads') ]['read_num'].values[0] except IndexError: # the re-parsed files don't have this row... total_reads = 0 total_genomic_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'GenomicReads') ]['read_num'].values[0] total_usable_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'UsableReads') ]['read_num'].values[0] total_repfamily_reads = df[ (df['total_or_element'] == '#READINFO') & ( df['element'] == 'RepFamilyReads') ]['read_num'].values[0] total_df = df[df['total_or_element'] == 'TOTAL'][ ['element', 'read_num', 'clip_rpr'] ] element_df = df[df['total_or_element'] == 'ELEMENT'][ ['element', 'read_num', 'clip_rpr'] ] return total_df, element_df, \ total_reads, total_genomic_reads, \ total_usable_reads, total_repfamily_reads
Python
def return_l2fc_entropy_from_parsed(ip_parsed, input_parsed, nopipes=True): """ From 2 parsed rep element pipeline outputs (ip and input), compute fold change and information content. Usually fold changes of > 3+ and information content of 0.1? can be considered enriched. Parameters ---------- ip_parsed : str filename of the ip parsed string input_parsed : str filename of the input parsed string nopipes : bool if True, return just the uniquely mapped rep family mappings if False, return all unique and nonunique Returns ------- merged : Pandas.DataFrame table consisting of fold enrichment and information content params """ total_ip, _, _, _, _, _ = read_parsed(ip_parsed) total_input, _, _, _, total_input_usable_reads, _ = read_parsed( input_parsed) # a pipe indicates read totals mapping to more than one element/rep family. if nopipes: total_ip = total_ip[ total_ip['element'].str.contains('\|') == False ] total_input = total_input[ total_input['element'].str.contains('\|') == False ] # index columns by their element total_ip.set_index('element', inplace=True) total_input.set_index('element', inplace=True) # rename the IP and input columns separately total_ip.columns = ["IP_{}".format(c) for c in total_ip.columns] total_input.columns = ["Input_{}".format(c) for c in total_input.columns] # merge the two on element id merged = pd.merge(total_ip, total_input, how='left', left_index=True, right_index=True) # deal with missing values merged['Input_read_num'].fillna( 1, inplace=True ) # Pseudocount all missing values merged['Input_clip_rpr'].fillna( merged['Input_read_num'] / (total_input_usable_reads), inplace=True) # calculate fold enrichment and information content merged['Fold_enrichment'] = merged['IP_clip_rpr'].div( merged['Input_clip_rpr']) merged['Information_content'] = merged['IP_clip_rpr'] * np.log2( merged['IP_clip_rpr'].div(merged['Input_clip_rpr'])) return merged
def return_l2fc_entropy_from_parsed(ip_parsed, input_parsed, nopipes=True): """ From 2 parsed rep element pipeline outputs (ip and input), compute fold change and information content. Usually fold changes of > 3+ and information content of 0.1? can be considered enriched. Parameters ---------- ip_parsed : str filename of the ip parsed string input_parsed : str filename of the input parsed string nopipes : bool if True, return just the uniquely mapped rep family mappings if False, return all unique and nonunique Returns ------- merged : Pandas.DataFrame table consisting of fold enrichment and information content params """ total_ip, _, _, _, _, _ = read_parsed(ip_parsed) total_input, _, _, _, total_input_usable_reads, _ = read_parsed( input_parsed) # a pipe indicates read totals mapping to more than one element/rep family. if nopipes: total_ip = total_ip[ total_ip['element'].str.contains('\|') == False ] total_input = total_input[ total_input['element'].str.contains('\|') == False ] # index columns by their element total_ip.set_index('element', inplace=True) total_input.set_index('element', inplace=True) # rename the IP and input columns separately total_ip.columns = ["IP_{}".format(c) for c in total_ip.columns] total_input.columns = ["Input_{}".format(c) for c in total_input.columns] # merge the two on element id merged = pd.merge(total_ip, total_input, how='left', left_index=True, right_index=True) # deal with missing values merged['Input_read_num'].fillna( 1, inplace=True ) # Pseudocount all missing values merged['Input_clip_rpr'].fillna( merged['Input_read_num'] / (total_input_usable_reads), inplace=True) # calculate fold enrichment and information content merged['Fold_enrichment'] = merged['IP_clip_rpr'].div( merged['Input_clip_rpr']) merged['Information_content'] = merged['IP_clip_rpr'] * np.log2( merged['IP_clip_rpr'].div(merged['Input_clip_rpr'])) return merged
Python
def maxBPM(self): '''An array to keep track of the applicants assigned to jobs. The value of matchR[i] is the applicant number assigned to job i, the value -1 indicates nobody is assigned.''' matchR = [-1] * self.jobs unassigned_applicants = [] # Count of jobs assigned to applicants result = 0 for i in range(self.ppl): # Mark all jobs as not seen for next applicant. seen = [False] * self.jobs # Find if the applicant 'u' can get a job if self.bpm(i, matchR, seen): result += 1 else: unassigned_applicants.append('Applicant number: {} not assigned any job'.format(i+1)) print('\n------') print('Result') print('------') print('\n=>Maximum number of applicants that can get a job is: {}'.format(result)) print() print('-----------') print('Final State') print('-----------') print('\n=>Jobs Assigned\n') # print assigned jobs data jobs_assigned = [] counter = 1 for i in matchR: if i is not -1: jobs_assigned.append([counter,i+1]) else: jobs_assigned.append([counter, 'not assigned!']) counter += 1 df = pd.DataFrame(jobs_assigned, columns=['Job','Assigned To Applicant']) print(df.to_string(index=False)) # print unassigned applicants data print('\n=>Unassigned Applicants\n') for i in unassigned_applicants: print(i)
def maxBPM(self): '''An array to keep track of the applicants assigned to jobs. The value of matchR[i] is the applicant number assigned to job i, the value -1 indicates nobody is assigned.''' matchR = [-1] * self.jobs unassigned_applicants = [] # Count of jobs assigned to applicants result = 0 for i in range(self.ppl): # Mark all jobs as not seen for next applicant. seen = [False] * self.jobs # Find if the applicant 'u' can get a job if self.bpm(i, matchR, seen): result += 1 else: unassigned_applicants.append('Applicant number: {} not assigned any job'.format(i+1)) print('\n------') print('Result') print('------') print('\n=>Maximum number of applicants that can get a job is: {}'.format(result)) print() print('-----------') print('Final State') print('-----------') print('\n=>Jobs Assigned\n') # print assigned jobs data jobs_assigned = [] counter = 1 for i in matchR: if i is not -1: jobs_assigned.append([counter,i+1]) else: jobs_assigned.append([counter, 'not assigned!']) counter += 1 df = pd.DataFrame(jobs_assigned, columns=['Job','Assigned To Applicant']) print(df.to_string(index=False)) # print unassigned applicants data print('\n=>Unassigned Applicants\n') for i in unassigned_applicants: print(i)
Python
def validate_rouge(args, device_id, pt, step, num_batches=500): """ Computes rouge scores on the first `num_batches` batches of the validation set. """ device = "cpu" if args.visible_gpus == '-1' else "cuda" if (pt != ''): test_from = pt else: test_from = args.test_from logger.info('Loading checkpoint from %s' % test_from) checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) print(args) tokenizer = BertTokenizer.from_pretrained(args.pretrained_dir, do_lower_case=True) symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'], 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']} if args.model == 'presumm': model = AbsSummarizer(args, device, checkpoint) elif args.model == 'sentsumm': model = SentSumm(args, device, symbols, checkpoint) else: raise ValueError('Unknown model: %s' % args.model) model.eval() valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False), args.test_batch_size, device, shuffle=False, is_test=True) predictor = build_predictor(args, tokenizer, symbols, model, logger) return predictor.translate(valid_iter, step, max_batches=num_batches)
def validate_rouge(args, device_id, pt, step, num_batches=500): """ Computes rouge scores on the first `num_batches` batches of the validation set. """ device = "cpu" if args.visible_gpus == '-1' else "cuda" if (pt != ''): test_from = pt else: test_from = args.test_from logger.info('Loading checkpoint from %s' % test_from) checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) print(args) tokenizer = BertTokenizer.from_pretrained(args.pretrained_dir, do_lower_case=True) symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'], 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']} if args.model == 'presumm': model = AbsSummarizer(args, device, checkpoint) elif args.model == 'sentsumm': model = SentSumm(args, device, symbols, checkpoint) else: raise ValueError('Unknown model: %s' % args.model) model.eval() valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False), args.test_batch_size, device, shuffle=False, is_test=True) predictor = build_predictor(args, tokenizer, symbols, model, logger) return predictor.translate(valid_iter, step, max_batches=num_batches)
Python
def is_from_pretrained(args, name): """ Returns True if a parameter is loaded from a pretrained checkpoint. """ if name.startswith('bert.model.'): return True elif args.pretrain_dec and (name.startswith('decoder.') or name.startswith('generator.')): return True else: return False
def is_from_pretrained(args, name): """ Returns True if a parameter is loaded from a pretrained checkpoint. """ if name.startswith('bert.model.'): return True elif args.pretrain_dec and (name.startswith('decoder.') or name.startswith('generator.')): return True else: return False
Python
def find_num_sentences(mask_tensor): """ Determines the number of sentences based on the first entry of the encoder attention mask. """ # the mask tensor is a concatenation of the mask for sentence representations and article BPE token embeddings # it should look like: [0, -1e9, -1e9, -1e9, 0, 0, ...] for an example with 4 sentences assert mask_tensor[0] == 0 i = 1 while mask_tensor[i] != 0: i += 1 assert i < 50 # something went wrong, we never have that many sentences return i
def find_num_sentences(mask_tensor): """ Determines the number of sentences based on the first entry of the encoder attention mask. """ # the mask tensor is a concatenation of the mask for sentence representations and article BPE token embeddings # it should look like: [0, -1e9, -1e9, -1e9, 0, 0, ...] for an example with 4 sentences assert mask_tensor[0] == 0 i = 1 while mask_tensor[i] != 0: i += 1 assert i < 50 # something went wrong, we never have that many sentences return i
Python
def split_into_sents(text): """ Splits the input `text` into sentences. """ text = text.replace('\n', ' ') text = ' '.join(text.split()) # replace multiple consecutive whitespace sents = nltk.sent_tokenize(text) return sents
def split_into_sents(text): """ Splits the input `text` into sentences. """ text = text.replace('\n', ' ') text = ' '.join(text.split()) # replace multiple consecutive whitespace sents = nltk.sent_tokenize(text) return sents
Python
def create_dataset(output_dir, formatter, articles, summaries, name, chunk_size, do_filter): """ Create a dataset with the given name. """ dataset = [] chunk_counter = 0 total_num_examples = 0 for article, summary in zip(articles, summaries): if pd.isna(article) or pd.isna(summary): continue sents_article = split_into_sents(article) summary = unicodedata.normalize('NFKC', summary) # remove non-breaking spaces and possibly others sents_summary = split_into_sents(summary) data = formatter.preprocess_curation_corpus(sents_article, sents_summary, do_filter=do_filter) if data is None: continue data_dict = { 'src': data[0], 'segs': data[1], 'tgt': data[2], 'src_txt': data[3], 'tgt_txt': data[4], 'src_sent_labels': [], 'clss': [], } dataset.append(data_dict) if len(dataset) >= chunk_size: dataset_path = os.path.join(output_dir, 'curation.%s.%d.bert.pt' % (name, chunk_counter)) print('Saving chunk with %d examples to %s' % (len(dataset), dataset_path)) torch.save(dataset, dataset_path) total_num_examples += len(dataset) dataset = [] chunk_counter += 1 if len(dataset) > 0: dataset_path = os.path.join(output_dir, 'curation.%s.%d.bert.pt' % (name, chunk_counter)) print('Saving chunk with %d examples to %s' % (len(dataset), dataset_path)) torch.save(dataset, dataset_path) total_num_examples += len(dataset) print('Saved %d examples for dataset %s' % (total_num_examples, name))
def create_dataset(output_dir, formatter, articles, summaries, name, chunk_size, do_filter): """ Create a dataset with the given name. """ dataset = [] chunk_counter = 0 total_num_examples = 0 for article, summary in zip(articles, summaries): if pd.isna(article) or pd.isna(summary): continue sents_article = split_into_sents(article) summary = unicodedata.normalize('NFKC', summary) # remove non-breaking spaces and possibly others sents_summary = split_into_sents(summary) data = formatter.preprocess_curation_corpus(sents_article, sents_summary, do_filter=do_filter) if data is None: continue data_dict = { 'src': data[0], 'segs': data[1], 'tgt': data[2], 'src_txt': data[3], 'tgt_txt': data[4], 'src_sent_labels': [], 'clss': [], } dataset.append(data_dict) if len(dataset) >= chunk_size: dataset_path = os.path.join(output_dir, 'curation.%s.%d.bert.pt' % (name, chunk_counter)) print('Saving chunk with %d examples to %s' % (len(dataset), dataset_path)) torch.save(dataset, dataset_path) total_num_examples += len(dataset) dataset = [] chunk_counter += 1 if len(dataset) > 0: dataset_path = os.path.join(output_dir, 'curation.%s.%d.bert.pt' % (name, chunk_counter)) print('Saving chunk with %d examples to %s' % (len(dataset), dataset_path)) torch.save(dataset, dataset_path) total_num_examples += len(dataset) print('Saved %d examples for dataset %s' % (total_num_examples, name))
Python
def _generate_next_sentence(self, tgt, mask_tgt, encoded_src, mask_encoded_src): """ Generates the next sentence representation based on the previous sentences. """ assert encoded_src.dim() == 3 batch_size = encoded_src.size(0) first_sents = self.first_sent_emb.repeat(batch_size, 1, 1) if tgt is not None: assert tgt.dim() == 2 encoded_sents, _, _, _ = self._encode_sentence(tgt, mask_tgt) if self.enc2sentgen is not None: encoded_sents = self.enc2sentgen(encoded_sents) input_sents = torch.cat([first_sents, encoded_sents], dim=1) else: # for first sentence representation, input_sents is just the first sentence embedding input_sents = first_sents mask_input_sents = torch.ones(input_sents.size()[:2], dtype=torch.uint8, device=self.device) output_sents = self.gen_summary_sents( hidden_states=input_sents, mask_hidden_states=mask_input_sents, encoder_hidden_states=encoded_src, mask_encoder_states=mask_encoded_src, ) if self.sentgen2wordgen is not None: output_sents = self.sentgen2wordgen(output_sents) return output_sents[:, -1]
def _generate_next_sentence(self, tgt, mask_tgt, encoded_src, mask_encoded_src): """ Generates the next sentence representation based on the previous sentences. """ assert encoded_src.dim() == 3 batch_size = encoded_src.size(0) first_sents = self.first_sent_emb.repeat(batch_size, 1, 1) if tgt is not None: assert tgt.dim() == 2 encoded_sents, _, _, _ = self._encode_sentence(tgt, mask_tgt) if self.enc2sentgen is not None: encoded_sents = self.enc2sentgen(encoded_sents) input_sents = torch.cat([first_sents, encoded_sents], dim=1) else: # for first sentence representation, input_sents is just the first sentence embedding input_sents = first_sents mask_input_sents = torch.ones(input_sents.size()[:2], dtype=torch.uint8, device=self.device) output_sents = self.gen_summary_sents( hidden_states=input_sents, mask_hidden_states=mask_input_sents, encoder_hidden_states=encoded_src, mask_encoder_states=mask_encoded_src, ) if self.sentgen2wordgen is not None: output_sents = self.sentgen2wordgen(output_sents) return output_sents[:, -1]
Python
def _get_next_sentence(self, tgt, encoded_src, mask_src): """ Get the next sentence representation either from the cache or by generating it. """ if self.symbols['EOS'] not in tgt: # no sentence has finished yet; use first sentence representation return self.generated_sents.get(torch.tensor([])) # there are finished previous sentences; try to retrieve their generated representation last_sep = self._find_indices(tgt, self.symbols['EOS']).tolist()[-1] completed_sents = tgt[:last_sep + 1] cached_sent_repr = self.generated_sents.get(completed_sents) if cached_sent_repr is not None: return cached_sent_repr # we have no cached sentence representation for these previous sentences tgt_sents = completed_sents.unsqueeze(0) mask_tgt_sents = torch.ones_like(tgt_sents, dtype=torch.uint8) sent_repr = self._generate_next_sentence(tgt_sents, mask_tgt_sents, encoded_src, mask_src) self.generated_sents.put(completed_sents, sent_repr) return sent_repr
def _get_next_sentence(self, tgt, encoded_src, mask_src): """ Get the next sentence representation either from the cache or by generating it. """ if self.symbols['EOS'] not in tgt: # no sentence has finished yet; use first sentence representation return self.generated_sents.get(torch.tensor([])) # there are finished previous sentences; try to retrieve their generated representation last_sep = self._find_indices(tgt, self.symbols['EOS']).tolist()[-1] completed_sents = tgt[:last_sep + 1] cached_sent_repr = self.generated_sents.get(completed_sents) if cached_sent_repr is not None: return cached_sent_repr # we have no cached sentence representation for these previous sentences tgt_sents = completed_sents.unsqueeze(0) mask_tgt_sents = torch.ones_like(tgt_sents, dtype=torch.uint8) sent_repr = self._generate_next_sentence(tgt_sents, mask_tgt_sents, encoded_src, mask_src) self.generated_sents.put(completed_sents, sent_repr) return sent_repr
Python
def _generate_next_word(self, tgt, mask_tgt, sentence_repr, encoded_src, mask_src): """ Generates the next word based on the current sentence representation and the previous words.""" assert tgt.dim() == 2 batch_size, tgt_len = tgt.size() if self.no_sent_repr: encoder_hidden_states = encoded_src mask_encoder_states = mask_src elif self.use_sent_cond: encoder_hidden_states = None mask_encoder_states = None if self.use_dec_src_attn: encoder_hidden_states = encoded_src.expand(batch_size, -1, -1) mask_encoder_states = mask_src[:, None, :] * mask_tgt[:, :, None] else: encoder_hidden_states = sentence_repr mask_encoder_states = torch.ones([batch_size, tgt_len, 1], dtype=torch.uint8, device=self.device) * mask_tgt[:, :, None] if self.use_dec_src_attn: encoder_hidden_states = torch.cat([encoder_hidden_states, encoded_src.expand(batch_size, -1, -1)], dim=1) mask_src_tgt = mask_src[:, None, :] * mask_tgt[:, :, None] mask_encoder_states = torch.cat([mask_encoder_states, mask_src_tgt], dim=2) params = { 'hidden_states': tgt, 'mask_hidden_states': mask_tgt, 'encoder_hidden_states': encoder_hidden_states, 'mask_encoder_states': mask_encoder_states, } if self.use_sent_cond: params['sentence_repr'] = sentence_repr return self.gen_summary_words.forward(**params)
def _generate_next_word(self, tgt, mask_tgt, sentence_repr, encoded_src, mask_src): """ Generates the next word based on the current sentence representation and the previous words.""" assert tgt.dim() == 2 batch_size, tgt_len = tgt.size() if self.no_sent_repr: encoder_hidden_states = encoded_src mask_encoder_states = mask_src elif self.use_sent_cond: encoder_hidden_states = None mask_encoder_states = None if self.use_dec_src_attn: encoder_hidden_states = encoded_src.expand(batch_size, -1, -1) mask_encoder_states = mask_src[:, None, :] * mask_tgt[:, :, None] else: encoder_hidden_states = sentence_repr mask_encoder_states = torch.ones([batch_size, tgt_len, 1], dtype=torch.uint8, device=self.device) * mask_tgt[:, :, None] if self.use_dec_src_attn: encoder_hidden_states = torch.cat([encoder_hidden_states, encoded_src.expand(batch_size, -1, -1)], dim=1) mask_src_tgt = mask_src[:, None, :] * mask_tgt[:, :, None] mask_encoder_states = torch.cat([mask_encoder_states, mask_src_tgt], dim=2) params = { 'hidden_states': tgt, 'mask_hidden_states': mask_tgt, 'encoder_hidden_states': encoder_hidden_states, 'mask_encoder_states': mask_encoder_states, } if self.use_sent_cond: params['sentence_repr'] = sentence_repr return self.gen_summary_words.forward(**params)
Python
def inference_step(self, encoded_src, mask_src, tgt, mask_tgt): """ Run one step of inference. Currently only works for batch size of 1. encoded_src: [batch_size x seq_len x hidden_size] tgt: [(batch_size * beam_size) x seq_len_so_far] """ assert encoded_src.dim() == 3 and encoded_src.size(0) == 1 # potentially project encoder output dimensions encoded_src_sentgen = encoded_src_wordgen = encoded_src if self.enc2sentgen is not None: encoded_src_sentgen = self.enc2sentgen(encoded_src) if self.enc2wordgen is not None: encoded_src_wordgen = self.enc2wordgen(encoded_src) # get sentence representations for previous sentences (from cache or generate) generated_sents = None if not self.no_sent_repr: generated_sents = [self._get_next_sentence(t, encoded_src_sentgen, mask_src) for t in tgt] generated_sents = torch.stack(generated_sents, dim=0) # generate the next word generated_words = self._generate_next_word(tgt, mask_tgt, generated_sents, encoded_src_wordgen, mask_src) return generated_words
def inference_step(self, encoded_src, mask_src, tgt, mask_tgt): """ Run one step of inference. Currently only works for batch size of 1. encoded_src: [batch_size x seq_len x hidden_size] tgt: [(batch_size * beam_size) x seq_len_so_far] """ assert encoded_src.dim() == 3 and encoded_src.size(0) == 1 # potentially project encoder output dimensions encoded_src_sentgen = encoded_src_wordgen = encoded_src if self.enc2sentgen is not None: encoded_src_sentgen = self.enc2sentgen(encoded_src) if self.enc2wordgen is not None: encoded_src_wordgen = self.enc2wordgen(encoded_src) # get sentence representations for previous sentences (from cache or generate) generated_sents = None if not self.no_sent_repr: generated_sents = [self._get_next_sentence(t, encoded_src_sentgen, mask_src) for t in tgt] generated_sents = torch.stack(generated_sents, dim=0) # generate the next word generated_words = self._generate_next_word(tgt, mask_tgt, generated_sents, encoded_src_wordgen, mask_src) return generated_words
Python
def preprocess_curation_corpus(self, src_sents, tgt_sents, do_filter=False): """ Applies Presumm formatting to articles and summaries. Expects a list for each src/tgt sentences (not tokenized into words yet, as opposed to the `preprocess` method). """ # remove article if too short (number of sentences) if do_filter and len(src_sents) < self.args.min_src_nsents: return None # clip article if too long (number of sentences) if do_filter and len(src_sents) > self.args.max_src_nsents: src_sents = src_sents[:self.args.max_src_nsents] # tokenize article, filter short sentences, clip long sentences src_subtokens = [] for src_sent in src_sents: subtokens = self.tokenizer.tokenize(src_sent) if do_filter and len(subtokens) < self.args.min_src_ntokens_per_sent: continue if do_filter and len(subtokens) > self.args.max_src_ntokens_per_sent: subtokens = subtokens[:self.args.max_src_ntokens_per_sent] src_subtokens.extend([self.cls_token] + subtokens + [self.sep_token]) # convert tokens to ids src_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(src_subtokens) # compute segment ids _segs = [-1] + [i for i, t in enumerate(src_subtoken_idxs) if t == self.sep_vid] segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))] segments_ids = [] for i, s in enumerate(segs): if i % 2 == 0: segments_ids += s * [0] else: segments_ids += s * [1] # tokenize summary sentence_join = ' ' + self.tgt_sent_split + ' ' tgt_sents_subtokens = [self.tokenizer.tokenize(tgt_sent) for tgt_sent in tgt_sents] tgt_subtokens_str = sentence_join.join([' '.join(sent_subtokens) for sent_subtokens in tgt_sents_subtokens]) tgt_subtokens_str = self.tgt_bos + ' ' + tgt_subtokens_str + ' ' + self.tgt_eos tgt_subtokens = tgt_subtokens_str.split() # possibly filter short summaries and clip long ones if do_filter and len(tgt_subtokens) < self.args.min_tgt_ntokens: return None if do_filter and len(tgt_subtokens) > self.args.max_tgt_ntokens: tgt_subtokens = tgt_subtokens[:self.args.max_tgt_ntokens - 1] + [self.tgt_eos] # convert tokens to ids tgt_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(tgt_subtokens) # create original text src_txt = ' '.join(src_sents) tgt_txt = '<q>'.join(tgt_sents) return src_subtoken_idxs, segments_ids, tgt_subtoken_idxs, src_txt, tgt_txt
def preprocess_curation_corpus(self, src_sents, tgt_sents, do_filter=False): """ Applies Presumm formatting to articles and summaries. Expects a list for each src/tgt sentences (not tokenized into words yet, as opposed to the `preprocess` method). """ # remove article if too short (number of sentences) if do_filter and len(src_sents) < self.args.min_src_nsents: return None # clip article if too long (number of sentences) if do_filter and len(src_sents) > self.args.max_src_nsents: src_sents = src_sents[:self.args.max_src_nsents] # tokenize article, filter short sentences, clip long sentences src_subtokens = [] for src_sent in src_sents: subtokens = self.tokenizer.tokenize(src_sent) if do_filter and len(subtokens) < self.args.min_src_ntokens_per_sent: continue if do_filter and len(subtokens) > self.args.max_src_ntokens_per_sent: subtokens = subtokens[:self.args.max_src_ntokens_per_sent] src_subtokens.extend([self.cls_token] + subtokens + [self.sep_token]) # convert tokens to ids src_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(src_subtokens) # compute segment ids _segs = [-1] + [i for i, t in enumerate(src_subtoken_idxs) if t == self.sep_vid] segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))] segments_ids = [] for i, s in enumerate(segs): if i % 2 == 0: segments_ids += s * [0] else: segments_ids += s * [1] # tokenize summary sentence_join = ' ' + self.tgt_sent_split + ' ' tgt_sents_subtokens = [self.tokenizer.tokenize(tgt_sent) for tgt_sent in tgt_sents] tgt_subtokens_str = sentence_join.join([' '.join(sent_subtokens) for sent_subtokens in tgt_sents_subtokens]) tgt_subtokens_str = self.tgt_bos + ' ' + tgt_subtokens_str + ' ' + self.tgt_eos tgt_subtokens = tgt_subtokens_str.split() # possibly filter short summaries and clip long ones if do_filter and len(tgt_subtokens) < self.args.min_tgt_ntokens: return None if do_filter and len(tgt_subtokens) > self.args.max_tgt_ntokens: tgt_subtokens = tgt_subtokens[:self.args.max_tgt_ntokens - 1] + [self.tgt_eos] # convert tokens to ids tgt_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(tgt_subtokens) # create original text src_txt = ' '.join(src_sents) tgt_txt = '<q>'.join(tgt_sents) return src_subtoken_idxs, segments_ids, tgt_subtoken_idxs, src_txt, tgt_txt
Python
def build_langid_filter(lang_code, lang_detector, threshold): """Create a langid filter function of the lang_code at given threshold. This filtering matches the mC4 LangID, and makes the threshold configurable. mC4 uses 0.7 as threshold. Args: lang_code: The language code we are considering lang_detector: The langID detector threshold: a float, the threshold to filter langid probability Returns: a langid filter function. """ def filter_fn(text): """Python function to filter texts based on lang id score.""" result = lang_detector.find_language(text=text) return (result.is_reliable and result.probability >= threshold and result.language == lang_code) return filter_fn
def build_langid_filter(lang_code, lang_detector, threshold): """Create a langid filter function of the lang_code at given threshold. This filtering matches the mC4 LangID, and makes the threshold configurable. mC4 uses 0.7 as threshold. Args: lang_code: The language code we are considering lang_detector: The langID detector threshold: a float, the threshold to filter langid probability Returns: a langid filter function. """ def filter_fn(text): """Python function to filter texts based on lang id score.""" result = lang_detector.find_language(text=text) return (result.is_reliable and result.probability >= threshold and result.language == lang_code) return filter_fn
Python
def filter_fn(text): """Python function to filter texts based on lang id score.""" result = lang_detector.find_language(text=text) return (result.is_reliable and result.probability >= threshold and result.language == lang_code)
def filter_fn(text): """Python function to filter texts based on lang id score.""" result = lang_detector.find_language(text=text) return (result.is_reliable and result.probability >= threshold and result.language == lang_code)
Python
def filter_langid(dataset, lang_code, lang_detector, text_key='text', threshold=0.95): """Create a dataset with langid confidence more than the given threshold. The input examples should have a key text_key associated with a tf.string value. Args: dataset: a tf.data.Dataset lang_code: The language code we are considering lang_detector: The langID detector text_key: a string, the key for the text feature to preprocess in the dataset examples. threshold: a float, the threshold to filter langid probability. Returns: a tf.data.Dataset """ filter_fn = build_langid_filter(lang_code, lang_detector, threshold) dataset = dataset.filter( lambda x: tf.numpy_function(filter_fn, [x[text_key]], tf.bool)) return dataset
def filter_langid(dataset, lang_code, lang_detector, text_key='text', threshold=0.95): """Create a dataset with langid confidence more than the given threshold. The input examples should have a key text_key associated with a tf.string value. Args: dataset: a tf.data.Dataset lang_code: The language code we are considering lang_detector: The langID detector text_key: a string, the key for the text feature to preprocess in the dataset examples. threshold: a float, the threshold to filter langid probability. Returns: a tf.data.Dataset """ filter_fn = build_langid_filter(lang_code, lang_detector, threshold) dataset = dataset.filter( lambda x: tf.numpy_function(filter_fn, [x[text_key]], tf.bool)) return dataset
Python
def main(argv: Sequence[str]) -> None: """Add numpy checkpoint values at some step to a prompt tuning checkpoint.""" if len(argv) > 1: raise app.UsageError("Too many command-line-arguments.") output_dir = FLAGS.output_dir if output_dir is None: output_dir = FLAGS.model_dir # Find the latest checkpoint latest_step = checkpoints.latest_step(FLAGS.model_dir) if latest_step is None: raise ValueError(f"Cannot find checkpoint directory in {FLAGS.model_dir}") logging.info("Loading checkpoint at step %d", latest_step) checkpoint_directory = checkpoints.get_checkpoint_dir( FLAGS.model_dir, latest_step) logging.info("Loading checkpoint from %s", checkpoint_directory) # Load the latest checkpoint checkpoint = checkpoints.load_t5x_checkpoint( checkpoint_directory, restore_dtype=FLAGS.restore_dtype, lazy_parameters=True) flat_checkpoint = state_utils.flatten_state_dict( checkpoint, keep_empty_nodes=True) # Find the numpy files for the given step # TODO: Abstract this step to allow for more control of the # over-writing, i.e. given associated lists of variable overwrite paths and # paths to numpy files, overwrite several variables without being tied to # prompts produced by this training run. numpy_files = find_numpy_checkpoints(FLAGS.model_dir, FLAGS.step) if not numpy_files: raise ValueError(f"Cannot find any number checkpoints in {FLAGS.model_dir} " f"with step {FLAGS.step}") for numpy_file in numpy_files: logging.info("Loading numpy variable from %s", numpy_file) # Load the numpy variable with gfile.GFile(numpy_file, "rb") as f: numpy_value = np.load(f).astype(FLAGS.restore_dtype) # Figure out where the variable goes in the pytree variable_path = file_name_to_variable_path(numpy_file) logging.info("Overwriting the variable at %s with the value loaded from %s", variable_path, numpy_file) # Update the variable in the pytree flat_checkpoint[f"target/{variable_path}"] = numpy_value # Set the step to the given step. logging.info("Setting step to %d", FLAGS.step) flat_checkpoint["state/step"] = np.asarray( FLAGS.step, dtype=flat_checkpoint["state/step"].dtype) # Save the checkpoint with the given step prompt included. checkpoint = traverse_util.unflatten_dict( {tuple(k.split("/")): v for k, v in flat_checkpoint.items()}) partitioner = partitioning.PjitPartitioner( num_partitions=1, params_on_devices=False) # TODO: Add option to configure what optimizer to use. optimizer = build_optimizer(checkpoint) # Turn the optimizer into the train_state object train_state = train_state_lib.TrainState.from_flax_optimizer(optimizer) train_state = train_state.restore_state(checkpoint) checkpointer = checkpoints.Checkpointer( train_state, partitioner, output_dir, save_dtype=FLAGS.save_dtype, restore_dtype=FLAGS.restore_dtype) logging.info("Saving result to %s", output_dir) # Actually save the new checkpoint. checkpointer.save(train_state, concurrent_gb=FLAGS.concurrent_gb)
def main(argv: Sequence[str]) -> None: """Add numpy checkpoint values at some step to a prompt tuning checkpoint.""" if len(argv) > 1: raise app.UsageError("Too many command-line-arguments.") output_dir = FLAGS.output_dir if output_dir is None: output_dir = FLAGS.model_dir # Find the latest checkpoint latest_step = checkpoints.latest_step(FLAGS.model_dir) if latest_step is None: raise ValueError(f"Cannot find checkpoint directory in {FLAGS.model_dir}") logging.info("Loading checkpoint at step %d", latest_step) checkpoint_directory = checkpoints.get_checkpoint_dir( FLAGS.model_dir, latest_step) logging.info("Loading checkpoint from %s", checkpoint_directory) # Load the latest checkpoint checkpoint = checkpoints.load_t5x_checkpoint( checkpoint_directory, restore_dtype=FLAGS.restore_dtype, lazy_parameters=True) flat_checkpoint = state_utils.flatten_state_dict( checkpoint, keep_empty_nodes=True) # Find the numpy files for the given step # TODO: Abstract this step to allow for more control of the # over-writing, i.e. given associated lists of variable overwrite paths and # paths to numpy files, overwrite several variables without being tied to # prompts produced by this training run. numpy_files = find_numpy_checkpoints(FLAGS.model_dir, FLAGS.step) if not numpy_files: raise ValueError(f"Cannot find any number checkpoints in {FLAGS.model_dir} " f"with step {FLAGS.step}") for numpy_file in numpy_files: logging.info("Loading numpy variable from %s", numpy_file) # Load the numpy variable with gfile.GFile(numpy_file, "rb") as f: numpy_value = np.load(f).astype(FLAGS.restore_dtype) # Figure out where the variable goes in the pytree variable_path = file_name_to_variable_path(numpy_file) logging.info("Overwriting the variable at %s with the value loaded from %s", variable_path, numpy_file) # Update the variable in the pytree flat_checkpoint[f"target/{variable_path}"] = numpy_value # Set the step to the given step. logging.info("Setting step to %d", FLAGS.step) flat_checkpoint["state/step"] = np.asarray( FLAGS.step, dtype=flat_checkpoint["state/step"].dtype) # Save the checkpoint with the given step prompt included. checkpoint = traverse_util.unflatten_dict( {tuple(k.split("/")): v for k, v in flat_checkpoint.items()}) partitioner = partitioning.PjitPartitioner( num_partitions=1, params_on_devices=False) # TODO: Add option to configure what optimizer to use. optimizer = build_optimizer(checkpoint) # Turn the optimizer into the train_state object train_state = train_state_lib.TrainState.from_flax_optimizer(optimizer) train_state = train_state.restore_state(checkpoint) checkpointer = checkpoints.Checkpointer( train_state, partitioner, output_dir, save_dtype=FLAGS.save_dtype, restore_dtype=FLAGS.restore_dtype) logging.info("Saving result to %s", output_dir) # Actually save the new checkpoint. checkpointer.save(train_state, concurrent_gb=FLAGS.concurrent_gb)
Python
async def mailbox(Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Get all emails from the database""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.view()})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
async def mailbox(Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Get all emails from the database""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.view()})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
Python
async def mailbox_id(id: int, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Get a single email with given id""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.view_single(id)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
async def mailbox_id(id: int, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Get a single email with given id""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.view_single(id)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
Python
async def mailbox_search(search_term: str, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Search email with a search term""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.search(search_term)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
async def mailbox_search(search_term: str, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Search email with a search term""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.search(search_term)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
Python
async def mailbox_del(id: int, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Delete an email with the given id""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.delete(id)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
async def mailbox_del(id: int, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)): """Delete an email with the given id""" Authorize.jwt_required() try: return JSONResponse(dumps({"success": True, "email": database.delete(id)})) except Exception as err: return JSONResponse({"success": False, "error": str(err)})
Python
def check_instance_is_nonactive(self, instance_id, marker=None): """Checks for existence of instance_id inside global instance index""" result, instance_response = self._indexapi.list( self.config.get_global_instance_index_id(), self.config.get_max_keys(), marker) if(result): # List global instance index is success. self._logger.info("Index listing result :" + str(instance_response.get_index_content())) global_instance_json = instance_response.get_index_content() global_instance_list = global_instance_json["Keys"] is_truncated = global_instance_json["IsTruncated"] if(global_instance_list is not None): for record in global_instance_list: if(record["Value"] == instance_id): # instance_id found. Skip entry and retry for delete oid again. self._logger.info("S3 Instance is still active. Skipping delete operation") return False if(is_truncated == "true"): self.check_instance_is_nonactive(instance_id, global_instance_json["NextMarker"]) # List global instance index results is empty hence instance_id not found. return True else: # List global instance index is failed. self._logger.error("Failed to list global instance index") return False
def check_instance_is_nonactive(self, instance_id, marker=None): """Checks for existence of instance_id inside global instance index""" result, instance_response = self._indexapi.list( self.config.get_global_instance_index_id(), self.config.get_max_keys(), marker) if(result): # List global instance index is success. self._logger.info("Index listing result :" + str(instance_response.get_index_content())) global_instance_json = instance_response.get_index_content() global_instance_list = global_instance_json["Keys"] is_truncated = global_instance_json["IsTruncated"] if(global_instance_list is not None): for record in global_instance_list: if(record["Value"] == instance_id): # instance_id found. Skip entry and retry for delete oid again. self._logger.info("S3 Instance is still active. Skipping delete operation") return False if(is_truncated == "true"): self.check_instance_is_nonactive(instance_id, global_instance_json["NextMarker"]) # List global instance index results is empty hence instance_id not found. return True else: # List global instance index is failed. self._logger.error("Failed to list global instance index") return False
Python
def version_entry_cb(self, versionEntry, current_oid, timeVersionEntry): """ Processes each version entry. Return false to skip the entry. True to process it by caller""" if (versionEntry is None or current_oid is None): return False # Check if version entry is same as the current_oid if (versionEntry["motr_oid"] == current_oid): self.current_obj_in_VersionList = versionEntry return False # Check if version entry is recent (i.e. not older than timeVersionEntry) if (not self.isVersionEntryOlderThan(versionEntry, timeVersionEntry)): return False if (versionEntry["motr_oid"] != current_oid): return True
def version_entry_cb(self, versionEntry, current_oid, timeVersionEntry): """ Processes each version entry. Return false to skip the entry. True to process it by caller""" if (versionEntry is None or current_oid is None): return False # Check if version entry is same as the current_oid if (versionEntry["motr_oid"] == current_oid): self.current_obj_in_VersionList = versionEntry return False # Check if version entry is recent (i.e. not older than timeVersionEntry) if (not self.isVersionEntryOlderThan(versionEntry, timeVersionEntry)): return False if (versionEntry["motr_oid"] != current_oid): return True
Python
def process_objects_in_versionlist(self, object_version_list_index, current_oid, callback, timeVersionEntry=15, marker = None): """ Identify object leak due to parallel PUT using the version list. Initial marker should be: object key name + "/" """ bStatus = False if (object_version_list_index is None or callback is None or current_oid is None): return bStatus self._logger.info("Processing version list for object leak oid " + self.object_leak_id) object_key = self.object_leak_info["object_key_in_index"] if (object_version_list_index is not None): extra_qparam = {'Prefix':object_key} ret, response_data = self._indexapi.list(object_version_list_index, self.config.get_max_keys(), marker, extra_qparam) if (ret): self._logger.info("Version listing result for object " + object_key + ": " + str(response_data.get_index_content())) object_versions = response_data.get_index_content() object_version_list = object_versions["Keys"] is_truncated = object_versions["IsTruncated"] bStatus = ret if (object_version_list is not None): self._logger.info("Processing " + str(len(object_version_list)) + " objects in version list = " + str(object_version_list)) for object_version in object_version_list: self._logger.info( "Fetched object version: " + str(object_version)) obj_ver_key = object_version["Key"] obj_ver_md = json.loads(object_version["Value"]) # Call the callback to process version entry cb_status = callback(obj_ver_md, current_oid, timeVersionEntry) if (cb_status == True): self._logger.info("Leak detected: Delete version object and version entry for key: " + obj_ver_key) # Delete object from store and delete the version entry from the version list status = self.del_obj_from_version_list(object_version_list_index, obj_ver_key) if (status): self._logger.info("Deleted leaked object at key: " + obj_ver_key) # Delete entry from probbale delete list as well, if any indx = self.config.get_probable_delete_index_id() indx_key = obj_ver_md["motr_oid"] self._logger.info("Deleting entry: " + indx_key + " from probbale list") status = self.delete_key_from_index(indx, indx_key, "PROBABLE INDEX DEL") if (status): self._logger.info("Deleted entry: " + indx_key + " from probbale list") else: self._logger.info("Error! Failed to delete leaked object at key: " + obj_ver_key) return False else: self._logger.info("Error: Failed to list object versions") return False last_key = object_versions["NextMarker"] if (is_truncated and last_key.startswith(object_key)): bStatus = self.process_objects_in_versionlist(object_version_list_index, obj_ver_key, callback, timeVersionEntry, last_key) return bStatus if (ret is False): self._logger.info("Failed to get Object version listing for object: " + self.object_key + " Error: " + str(response_data)) if (response_data.get_error_status() == 404): self._logger.info("Object " + object_key + " is Not found(404) in the version list") return bStatus
def process_objects_in_versionlist(self, object_version_list_index, current_oid, callback, timeVersionEntry=15, marker = None): """ Identify object leak due to parallel PUT using the version list. Initial marker should be: object key name + "/" """ bStatus = False if (object_version_list_index is None or callback is None or current_oid is None): return bStatus self._logger.info("Processing version list for object leak oid " + self.object_leak_id) object_key = self.object_leak_info["object_key_in_index"] if (object_version_list_index is not None): extra_qparam = {'Prefix':object_key} ret, response_data = self._indexapi.list(object_version_list_index, self.config.get_max_keys(), marker, extra_qparam) if (ret): self._logger.info("Version listing result for object " + object_key + ": " + str(response_data.get_index_content())) object_versions = response_data.get_index_content() object_version_list = object_versions["Keys"] is_truncated = object_versions["IsTruncated"] bStatus = ret if (object_version_list is not None): self._logger.info("Processing " + str(len(object_version_list)) + " objects in version list = " + str(object_version_list)) for object_version in object_version_list: self._logger.info( "Fetched object version: " + str(object_version)) obj_ver_key = object_version["Key"] obj_ver_md = json.loads(object_version["Value"]) # Call the callback to process version entry cb_status = callback(obj_ver_md, current_oid, timeVersionEntry) if (cb_status == True): self._logger.info("Leak detected: Delete version object and version entry for key: " + obj_ver_key) # Delete object from store and delete the version entry from the version list status = self.del_obj_from_version_list(object_version_list_index, obj_ver_key) if (status): self._logger.info("Deleted leaked object at key: " + obj_ver_key) # Delete entry from probbale delete list as well, if any indx = self.config.get_probable_delete_index_id() indx_key = obj_ver_md["motr_oid"] self._logger.info("Deleting entry: " + indx_key + " from probbale list") status = self.delete_key_from_index(indx, indx_key, "PROBABLE INDEX DEL") if (status): self._logger.info("Deleted entry: " + indx_key + " from probbale list") else: self._logger.info("Error! Failed to delete leaked object at key: " + obj_ver_key) return False else: self._logger.info("Error: Failed to list object versions") return False last_key = object_versions["NextMarker"] if (is_truncated and last_key.startswith(object_key)): bStatus = self.process_objects_in_versionlist(object_version_list_index, obj_ver_key, callback, timeVersionEntry, last_key) return bStatus if (ret is False): self._logger.info("Failed to get Object version listing for object: " + self.object_key + " Error: " + str(response_data)) if (response_data.get_error_status() == 404): self._logger.info("Object " + object_key + " is Not found(404) in the version list") return bStatus
Python
def purge(self): """Purge/Delete all the messages.""" if not self._message_bus: raise Exception("Non Existent Message Bus, Cannot Purge") self._producer.delete()
def purge(self): """Purge/Delete all the messages.""" if not self._message_bus: raise Exception("Non Existent Message Bus, Cannot Purge") self._producer.delete()
Python
def milestone_pricing(chain, start_time, end_time): """Milestone pricing, do not set presale collection contract.""" week = 24*3600 * 7 args = [ [ start_time + 0, to_wei("0.0009", "ether"), start_time + week*1, to_wei("0.0009", "ether"), end_time, 0, ], ] tx = { "gas": 3141592 } contract, hash = chain.provider.deploy_contract('MilestonePricing', deploy_args=args, deploy_transaction=tx) return contract
def milestone_pricing(chain, start_time, end_time): """Milestone pricing, do not set presale collection contract.""" week = 24*3600 * 7 args = [ [ start_time + 0, to_wei("0.0009", "ether"), start_time + week*1, to_wei("0.0009", "ether"), end_time, 0, ], ] tx = { "gas": 3141592 } contract, hash = chain.provider.deploy_contract('MilestonePricing', deploy_args=args, deploy_transaction=tx) return contract
Python
def original_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, token, founder_allocation) -> Contract: """Crowdsale that we are going to relaunch. This will reserve the release agent for the original crowdsale. """ args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('MintedTokenCappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True set_finalizer(chain, token, contract, team_multisig, founder_allocation) return contract
def original_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, token, founder_allocation) -> Contract: """Crowdsale that we are going to relaunch. This will reserve the release agent for the original crowdsale. """ args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('MintedTokenCappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True set_finalizer(chain, token, contract, team_multisig, founder_allocation) return contract
Python
def relaunched_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, original_crowdsale, token, founder_allocation) -> Contract: """Create a crowdsale with fixed contracts.""" args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('RelaunchedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True # TODO: Use dummy finalizer here founder_allocation = 0 set_extra_finalizer(chain, token, contract, team_multisig, founder_allocation) return contract
def relaunched_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, original_crowdsale, token, founder_allocation) -> Contract: """Create a crowdsale with fixed contracts.""" args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('RelaunchedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True # TODO: Use dummy finalizer here founder_allocation = 0 set_extra_finalizer(chain, token, contract, team_multisig, founder_allocation) return contract
Python
def release_agent(chain, team_multisig, token) -> Contract: """Create a simple release agent (useful for testing).""" args = [token.address] tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('SimpleReleaseAgent', deploy_args=args, deploy_transaction=tx) return contract
def release_agent(chain, team_multisig, token) -> Contract: """Create a simple release agent (useful for testing).""" args = [token.address] tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('SimpleReleaseAgent', deploy_args=args, deploy_transaction=tx) return contract