language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def add_features(df): """ Create normalized values for even display """ assert set(["query", "value", "keyword", "ranking", "query_timestamp", "geo"]).issubset(df.columns), "Add features failed. \ Missing one of [query, value, keyword, ranking, query_timestamp, geo]" # feature engineering: totals and normalize grouped = df.groupby(['ranking']).value # group values by ranking df['value_total'] = grouped.transform('sum') # total sum df['value_normalized'] = ((df.value - grouped.transform('min')) / ( grouped.transform('max') - grouped.transform('min'))).astype(float) df['value_normalized_total'] = df.groupby( ['ranking']).value_normalized.transform('sum') # total sum of normalized values df['date'] = pd.to_datetime(df.query_timestamp).dt.strftime("%d. %B %Y") return df
def add_features(df): """ Create normalized values for even display """ assert set(["query", "value", "keyword", "ranking", "query_timestamp", "geo"]).issubset(df.columns), "Add features failed. \ Missing one of [query, value, keyword, ranking, query_timestamp, geo]" # feature engineering: totals and normalize grouped = df.groupby(['ranking']).value # group values by ranking df['value_total'] = grouped.transform('sum') # total sum df['value_normalized'] = ((df.value - grouped.transform('min')) / ( grouped.transform('max') - grouped.transform('min'))).astype(float) df['value_normalized_total'] = df.groupby( ['ranking']).value_normalized.transform('sum') # total sum of normalized values df['date'] = pd.to_datetime(df.query_timestamp).dt.strftime("%d. %B %Y") return df
Python
def select_topn(df, top_n): """ Select top-n keywords for each ranking ordered by value """ assert df.columns.str.contains("ranking").any( ), "select_topn failed. Missing 'ranking' column." df = df.reset_index(drop=True) df.value = pd.to_numeric(df.value, errors='coerce') # avoid object dtype topn_idx = df.groupby("ranking")['value'].nlargest( top_n).droplevel(0).index return df.loc[topn_idx, :]
def select_topn(df, top_n): """ Select top-n keywords for each ranking ordered by value """ assert df.columns.str.contains("ranking").any( ), "select_topn failed. Missing 'ranking' column." df = df.reset_index(drop=True) df.value = pd.to_numeric(df.value, errors='coerce') # avoid object dtype topn_idx = df.groupby("ranking")['value'].nlargest( top_n).droplevel(0).index return df.loc[topn_idx, :]
Python
def add_features(df): """ Create normalized values for even display """ assert df.columns.str.contains("query|value|keyword|ranking|timestamp|geo").all(), "Add features failed. \ Missing one of [query, value, keyword, ranking, timestamp, geo]" # feature engineering: totals and normalize grouped = df.groupby(['ranking']).value # group values by ranking df['value_total'] = grouped.transform('sum') # total sum df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values df['date'] = pd.to_datetime(df.query_timestamp).dtd return df
def add_features(df): """ Create normalized values for even display """ assert df.columns.str.contains("query|value|keyword|ranking|timestamp|geo").all(), "Add features failed. \ Missing one of [query, value, keyword, ranking, timestamp, geo]" # feature engineering: totals and normalize grouped = df.groupby(['ranking']).value # group values by ranking df['value_total'] = grouped.transform('sum') # total sum df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values df['date'] = pd.to_datetime(df.query_timestamp).dtd return df
Python
def select_topn(df, top_n=25): """ Select top-n keywords for each ranking and value_normalized """ assert df.columns.str.contains("ranking").any(), "select_topn failed. Missing 'ranking' column." # top-n by ranking topn_idx = df.groupby("ranking").value_normalized.nlargest(top_n).droplevel(0).index return df.loc[topn_idx, : ]
def select_topn(df, top_n=25): """ Select top-n keywords for each ranking and value_normalized """ assert df.columns.str.contains("ranking").any(), "select_topn failed. Missing 'ranking' column." # top-n by ranking topn_idx = df.groupby("ranking").value_normalized.nlargest(top_n).droplevel(0).index return df.loc[topn_idx, : ]
Python
def load_config(filepath): """Return dictionary with settings and final CSV path""" with open(filepath) as file: config = yaml.full_load(file) root_dir = config['dir']['root'] # project root final_dir = config['dir']['final_data'] # final data dir from root filename = config['project']['analysis_file'] # filename to store path_analysis_file = os.path.join(root_dir, final_dir, filename+'.csv') return config, path_analysis_file
def load_config(filepath): """Return dictionary with settings and final CSV path""" with open(filepath) as file: config = yaml.full_load(file) root_dir = config['dir']['root'] # project root final_dir = config['dir']['final_data'] # final data dir from root filename = config['project']['analysis_file'] # filename to store path_analysis_file = os.path.join(root_dir, final_dir, filename+'.csv') return config, path_analysis_file
Python
def load_data(filepath): """Read csv-only file from data_dir/filename""" logging.info(f"Load data from {filepath}") df = pd.read_csv(filepath) df = set_dtypes(df) df = df.sort_values(by='query_date') return df
def load_data(filepath): """Read csv-only file from data_dir/filename""" logging.info(f"Load data from {filepath}") df = pd.read_csv(filepath) df = set_dtypes(df) df = df.sort_values(by='query_date') return df
Python
def trends_statistics(df): """Generate relevant statistics for a ranking category of Google trends (rising or top)""" # time series indicator: t df['t'] = df.groupby('query_date').ngroup() # most recent period t_max = df.t.max() logging.debug(f"df dtypes after adding time series: {df.dtypes}") # ranking # absolute df['rank_t'] = df.groupby('t').value.rank(method='first', ascending=False) # rank in previous period (t-1) df['rank_t-1'] = df.groupby('query').rank_t.shift() # rank change from previous, t-1, to current period, t df['rank_absolute_change'] = df.rank_t - df['rank_t-1'] # winners and loosers (ranking of absoulte changes) df['rank_absoulte_change_ranking'] = df.groupby( 't').rank_absolute_change.rank(method='first', ascending=False) # percentile df['rank_pct_t'] = df.groupby('t').value.rank( method='first', ascending=False, pct=True) df['rank_pct_t-1'] = df.groupby('query').rank_pct_t.shift() df['rank_pct_change'] = df.rank_pct_t - df['rank_pct_t-1'] # new entries at time t df['new_entry_t'] = (pd.isna(df['rank_t-1']) & pd.notnull(df.rank_t)) * 1 # dropouts at time t+1 # keywords for each period to compare sets queries_dict = df.groupby('t')['query'].apply(list).to_dict() # compare query responses sets across last two periods dropouts = list( set(queries_dict[(t_max - 1)]).difference(set(queries_dict[t_max]))) df['dropout_t+1'] = ((df.t == t_max - 1) & df['query'].isin(dropouts)) * 1 # fill missings df = df.fillna(0) return df
def trends_statistics(df): """Generate relevant statistics for a ranking category of Google trends (rising or top)""" # time series indicator: t df['t'] = df.groupby('query_date').ngroup() # most recent period t_max = df.t.max() logging.debug(f"df dtypes after adding time series: {df.dtypes}") # ranking # absolute df['rank_t'] = df.groupby('t').value.rank(method='first', ascending=False) # rank in previous period (t-1) df['rank_t-1'] = df.groupby('query').rank_t.shift() # rank change from previous, t-1, to current period, t df['rank_absolute_change'] = df.rank_t - df['rank_t-1'] # winners and loosers (ranking of absoulte changes) df['rank_absoulte_change_ranking'] = df.groupby( 't').rank_absolute_change.rank(method='first', ascending=False) # percentile df['rank_pct_t'] = df.groupby('t').value.rank( method='first', ascending=False, pct=True) df['rank_pct_t-1'] = df.groupby('query').rank_pct_t.shift() df['rank_pct_change'] = df.rank_pct_t - df['rank_pct_t-1'] # new entries at time t df['new_entry_t'] = (pd.isna(df['rank_t-1']) & pd.notnull(df.rank_t)) * 1 # dropouts at time t+1 # keywords for each period to compare sets queries_dict = df.groupby('t')['query'].apply(list).to_dict() # compare query responses sets across last two periods dropouts = list( set(queries_dict[(t_max - 1)]).difference(set(queries_dict[t_max]))) df['dropout_t+1'] = ((df.t == t_max - 1) & df['query'].isin(dropouts)) * 1 # fill missings df = df.fillna(0) return df
Python
def dashboard_data(df): """Create statistics for a rank category (rising as default) """ df_list = [] for rank_category in df.ranking.unique().tolist(): logging.info(f"dashboard_data(): rank category: {rank_category}") df = df.loc[df.ranking == rank_category].reset_index(drop=True) df_trends = (df.pipe(create_query_date) .pipe(drop_duplicates) .pipe(trends_statistics)) df_list.append(df_trends) return pd.concat(df_list)
def dashboard_data(df): """Create statistics for a rank category (rising as default) """ df_list = [] for rank_category in df.ranking.unique().tolist(): logging.info(f"dashboard_data(): rank category: {rank_category}") df = df.loc[df.ranking == rank_category].reset_index(drop=True) df_trends = (df.pipe(create_query_date) .pipe(drop_duplicates) .pipe(trends_statistics)) df_list.append(df_trends) return pd.concat(df_list)
Python
def gallery_conf(tmpdir): """Sets up a test sphinx-gallery configuration""" gallery_conf = _complete_gallery_conf({}, str(tmpdir), True, False) gallery_conf.update(examples_dir=_TempDir(), gallery_dir=str(tmpdir)) return gallery_conf
def gallery_conf(tmpdir): """Sets up a test sphinx-gallery configuration""" gallery_conf = _complete_gallery_conf({}, str(tmpdir), True, False) gallery_conf.update(examples_dir=_TempDir(), gallery_dir=str(tmpdir)) return gallery_conf
Python
def scale_image(in_fname, out_fname, max_width, max_height): """Scales an image with the same aspect ratio centered in an image box with the given max_width and max_height if in_fname == out_fname the image can only be scaled down """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = max_width / float(width_in) scale_h = max_height / float(height_in) if height_in * scale_w <= max_height: scale = scale_w else: scale = scale_h if scale >= 1.0 and in_fname == out_fname: return width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image using resize; if using .thumbnail and the image is # already smaller than max_width, max_height, then this won't scale up # at all (maybe could be an option someday...) img = img.resize((width_sc, height_sc), Image.BICUBIC) # img.thumbnail((width_sc, height_sc), Image.BICUBIC) # width_sc, height_sc = img.size # necessary if using thumbnail # insert centered thumb = Image.new('RGBA', (max_width, max_height), (255, 255, 255, 255)) pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname)
def scale_image(in_fname, out_fname, max_width, max_height): """Scales an image with the same aspect ratio centered in an image box with the given max_width and max_height if in_fname == out_fname the image can only be scaled down """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = max_width / float(width_in) scale_h = max_height / float(height_in) if height_in * scale_w <= max_height: scale = scale_w else: scale = scale_h if scale >= 1.0 and in_fname == out_fname: return width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image using resize; if using .thumbnail and the image is # already smaller than max_width, max_height, then this won't scale up # at all (maybe could be an option someday...) img = img.resize((width_sc, height_sc), Image.BICUBIC) # img.thumbnail((width_sc, height_sc), Image.BICUBIC) # width_sc, height_sc = img.size # necessary if using thumbnail # insert centered thumb = Image.new('RGBA', (max_width, max_height), (255, 255, 255, 255)) pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname)
Python
def replace_py_ipynb(fname): """Replace .py extension in filename by .ipynb""" fname_prefix, extension = os.path.splitext(fname) allowed_extension = '.py' if extension != allowed_extension: raise ValueError( "Unrecognized file extension, expected %s, got %s" % (allowed_extension, extension)) new_extension = '.ipynb' return '{}{}'.format(fname_prefix, new_extension)
def replace_py_ipynb(fname): """Replace .py extension in filename by .ipynb""" fname_prefix, extension = os.path.splitext(fname) allowed_extension = '.py' if extension != allowed_extension: raise ValueError( "Unrecognized file extension, expected %s, got %s" % (allowed_extension, extension)) new_extension = '.ipynb' return '{}{}'.format(fname_prefix, new_extension)
Python
def extract_intro_and_title(filename, docstring): """ Extract the first paragraph of module-level docstring. max:95 char""" # lstrip is just in case docstring has a '\n\n' at the beginning paragraphs = docstring.lstrip().split('\n\n') # remove comments and other syntax like `.. _link:` paragraphs = [p for p in paragraphs if not p.startswith('.. ') and len(p) > 0] if len(paragraphs) == 0: raise ValueError( "Example docstring should have a header for the example title. " "Please check the example file:\n {}\n".format(filename)) # Title is the first paragraph with any ReSTructuredText title chars # removed, i.e. lines that consist of (all the same) 7-bit non-ASCII chars. # This conditional is not perfect but should hopefully be good enough. title_paragraph = paragraphs[0] match = re.search(r'([\w ]+)', title_paragraph) if match is None: raise ValueError( 'Could not find a title in first paragraph:\n{}'.format( title_paragraph)) title = match.group(1).strip() # Use the title if no other paragraphs are provided intro_paragraph = title if len(paragraphs) < 2 else paragraphs[1] # Concatenate all lines of the first paragraph and truncate at 95 chars intro = re.sub('\n', ' ', intro_paragraph) if len(intro) > 95: intro = intro[:95] + '...' return intro, title
def extract_intro_and_title(filename, docstring): """ Extract the first paragraph of module-level docstring. max:95 char""" # lstrip is just in case docstring has a '\n\n' at the beginning paragraphs = docstring.lstrip().split('\n\n') # remove comments and other syntax like `.. _link:` paragraphs = [p for p in paragraphs if not p.startswith('.. ') and len(p) > 0] if len(paragraphs) == 0: raise ValueError( "Example docstring should have a header for the example title. " "Please check the example file:\n {}\n".format(filename)) # Title is the first paragraph with any ReSTructuredText title chars # removed, i.e. lines that consist of (all the same) 7-bit non-ASCII chars. # This conditional is not perfect but should hopefully be good enough. title_paragraph = paragraphs[0] match = re.search(r'([\w ]+)', title_paragraph) if match is None: raise ValueError( 'Could not find a title in first paragraph:\n{}'.format( title_paragraph)) title = match.group(1).strip() # Use the title if no other paragraphs are provided intro_paragraph = title if len(paragraphs) < 2 else paragraphs[1] # Concatenate all lines of the first paragraph and truncate at 95 chars intro = re.sub('\n', ' ', intro_paragraph) if len(intro) > 95: intro = intro[:95] + '...' return intro, title
Python
def save_thumbnail(image_path_template, src_file, file_conf, gallery_conf): """Generate and Save the thumbnail image Parameters ---------- image_path_template : str holds the template where to save and how to name the image src_file : str path to source python file gallery_conf : dict Sphinx-Gallery configuration dictionary """ # read specification of the figure to display as thumbnail from main text thumbnail_number = file_conf.get('thumbnail_number', 1) if not isinstance(thumbnail_number, int): raise TypeError( 'sphinx_gallery_thumbnail_number setting is not a number, ' 'got %r' % (thumbnail_number,)) thumbnail_image_path = image_path_template.format(thumbnail_number) thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb') if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) base_image_name = os.path.splitext(os.path.basename(src_file))[0] thumb_file = os.path.join(thumb_dir, 'sphx_glr_%s_thumb.png' % base_image_name) if src_file in gallery_conf['failing_examples']: img = os.path.join(glr_path_static(), 'broken_example.png') elif os.path.exists(thumbnail_image_path): img = thumbnail_image_path elif not os.path.exists(thumb_file): # create something to replace the thumbnail img = os.path.join(glr_path_static(), 'no_image.png') img = gallery_conf.get("default_thumb_file", img) else: return scale_image(img, thumb_file, *gallery_conf["thumbnail_size"])
def save_thumbnail(image_path_template, src_file, file_conf, gallery_conf): """Generate and Save the thumbnail image Parameters ---------- image_path_template : str holds the template where to save and how to name the image src_file : str path to source python file gallery_conf : dict Sphinx-Gallery configuration dictionary """ # read specification of the figure to display as thumbnail from main text thumbnail_number = file_conf.get('thumbnail_number', 1) if not isinstance(thumbnail_number, int): raise TypeError( 'sphinx_gallery_thumbnail_number setting is not a number, ' 'got %r' % (thumbnail_number,)) thumbnail_image_path = image_path_template.format(thumbnail_number) thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb') if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) base_image_name = os.path.splitext(os.path.basename(src_file))[0] thumb_file = os.path.join(thumb_dir, 'sphx_glr_%s_thumb.png' % base_image_name) if src_file in gallery_conf['failing_examples']: img = os.path.join(glr_path_static(), 'broken_example.png') elif os.path.exists(thumbnail_image_path): img = thumbnail_image_path elif not os.path.exists(thumb_file): # create something to replace the thumbnail img = os.path.join(glr_path_static(), 'no_image.png') img = gallery_conf.get("default_thumb_file", img) else: return scale_image(img, thumb_file, *gallery_conf["thumbnail_size"])
Python
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs): """Generate the gallery reStructuredText for an example directory""" head_ref = os.path.relpath(target_dir, gallery_conf['src_dir']) fhindex = """\n\n.. _sphx_glr_{0}:\n\n""".format( head_ref.replace(os.path.sep, '_')) with codecs.open(os.path.join(src_dir, 'README.txt'), 'r', encoding='utf-8') as fid: fhindex += fid.read() # Add empty lines to avoid bug in issue #165 fhindex += "\n\n" if not os.path.exists(target_dir): os.makedirs(target_dir) # get filenames listdir = [fname for fname in os.listdir(src_dir) if fname.endswith('.py')] # limit which to look at based on regex (similar to filename_pattern) listdir = [fname for fname in listdir if re.search(gallery_conf['ignore_pattern'], os.path.normpath(os.path.join(src_dir, fname))) is None] # sort them sorted_listdir = sorted( listdir, key=gallery_conf['within_subsection_order'](src_dir)) entries_text = [] computation_times = [] build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir']) iterator = sphinx_compatibility.status_iterator( sorted_listdir, 'generating gallery for %s... ' % build_target_dir, length=len(sorted_listdir)) clean_modules(gallery_conf, src_dir) # fix gh-316 for fname in iterator: intro, time_elapsed = generate_file_rst( fname, target_dir, src_dir, gallery_conf) clean_modules(gallery_conf, fname) computation_times.append((time_elapsed, fname)) this_entry = _thumbnail_div(build_target_dir, fname, intro) + """ .. toctree:: :hidden: /%s\n""" % os.path.join(build_target_dir, fname[:-3]).replace(os.sep, '/') entries_text.append(this_entry) if gallery_conf['backreferences_dir']: write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, intro) for entry_text in entries_text: fhindex += entry_text # clear at the end of the section fhindex += """.. raw:: html\n <div style='clear:both'></div>\n\n""" return fhindex, computation_times
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs): """Generate the gallery reStructuredText for an example directory""" head_ref = os.path.relpath(target_dir, gallery_conf['src_dir']) fhindex = """\n\n.. _sphx_glr_{0}:\n\n""".format( head_ref.replace(os.path.sep, '_')) with codecs.open(os.path.join(src_dir, 'README.txt'), 'r', encoding='utf-8') as fid: fhindex += fid.read() # Add empty lines to avoid bug in issue #165 fhindex += "\n\n" if not os.path.exists(target_dir): os.makedirs(target_dir) # get filenames listdir = [fname for fname in os.listdir(src_dir) if fname.endswith('.py')] # limit which to look at based on regex (similar to filename_pattern) listdir = [fname for fname in listdir if re.search(gallery_conf['ignore_pattern'], os.path.normpath(os.path.join(src_dir, fname))) is None] # sort them sorted_listdir = sorted( listdir, key=gallery_conf['within_subsection_order'](src_dir)) entries_text = [] computation_times = [] build_target_dir = os.path.relpath(target_dir, gallery_conf['src_dir']) iterator = sphinx_compatibility.status_iterator( sorted_listdir, 'generating gallery for %s... ' % build_target_dir, length=len(sorted_listdir)) clean_modules(gallery_conf, src_dir) # fix gh-316 for fname in iterator: intro, time_elapsed = generate_file_rst( fname, target_dir, src_dir, gallery_conf) clean_modules(gallery_conf, fname) computation_times.append((time_elapsed, fname)) this_entry = _thumbnail_div(build_target_dir, fname, intro) + """ .. toctree:: :hidden: /%s\n""" % os.path.join(build_target_dir, fname[:-3]).replace(os.sep, '/') entries_text.append(this_entry) if gallery_conf['backreferences_dir']: write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, intro) for entry_text in entries_text: fhindex += entry_text # clear at the end of the section fhindex += """.. raw:: html\n <div style='clear:both'></div>\n\n""" return fhindex, computation_times
Python
def _memory_usage(func, gallery_conf): """Get memory usage of a function call.""" if gallery_conf['show_memory']: from memory_profiler import memory_usage assert callable(func) mem, out = memory_usage(func, max_usage=True, retval=True, multiprocess=True) mem = mem[0] else: out = func() mem = 0 return out, mem
def _memory_usage(func, gallery_conf): """Get memory usage of a function call.""" if gallery_conf['show_memory']: from memory_profiler import memory_usage assert callable(func) mem, out = memory_usage(func, max_usage=True, retval=True, multiprocess=True) mem = mem[0] else: out = func() mem = 0 return out, mem
Python
def _get_memory_base(gallery_conf): """Get the base amount of memory used by running a Python process.""" if not gallery_conf['show_memory']: memory_base = 0 else: # There might be a cleaner way to do this at some point from memory_profiler import memory_usage proc = subprocess.Popen([sys.executable, '-c', 'import time; time.sleep(1.0)']) memory_base = max(memory_usage(proc, interval=1e-3, timeout=0.1)) return memory_base
def _get_memory_base(gallery_conf): """Get the base amount of memory used by running a Python process.""" if not gallery_conf['show_memory']: memory_base = 0 else: # There might be a cleaner way to do this at some point from memory_profiler import memory_usage proc = subprocess.Popen([sys.executable, '-c', 'import time; time.sleep(1.0)']) memory_base = max(memory_usage(proc, interval=1e-3, timeout=0.1)) return memory_base
Python
def execute_code_block(compiler, block, example_globals, script_vars, gallery_conf): """Executes the code block of the example file""" blabel, bcontent, lineno = block # If example is not suitable to run, skip executing its blocks if not script_vars['execute_script'] or blabel == 'text': script_vars['memory_delta'].append(0) return '' cwd = os.getcwd() # Redirect output to stdout and orig_stdout = sys.stdout src_file = script_vars['src_file'] # First cd in the original example dir, so that any file # created by the example get created in this directory my_stdout = MixedEncodingStringIO() os.chdir(os.path.dirname(src_file)) sys.stdout = LoggingTee(my_stdout, logger, src_file) try: dont_inherit = 1 code_ast = compile(bcontent, src_file, 'exec', ast.PyCF_ONLY_AST | compiler.flags, dont_inherit) ast.increment_lineno(code_ast, lineno - 1) # don't use unicode_literals at the top of this file or you get # nasty errors here on Py2.7 _, mem = _memory_usage(_exec_once( compiler(code_ast, src_file, 'exec'), example_globals), gallery_conf) except Exception: sys.stdout.flush() sys.stdout = orig_stdout except_rst = handle_exception(sys.exc_info(), src_file, script_vars, gallery_conf) # python2.7: Code was read in bytes needs decoding to utf-8 # unless future unicode_literals is imported in source which # make ast output unicode strings if hasattr(except_rst, 'decode') and not \ isinstance(except_rst, unicode): except_rst = except_rst.decode('utf-8') code_output = u"\n{0}\n\n\n\n".format(except_rst) # still call this even though we won't use the images so that # figures are closed save_figures(block, script_vars, gallery_conf) mem = 0 else: sys.stdout.flush() sys.stdout = orig_stdout os.chdir(cwd) my_stdout = my_stdout.getvalue().strip().expandtabs() if my_stdout: stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4)) else: stdout = '' images_rst = save_figures(block, script_vars, gallery_conf) code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout) finally: os.chdir(cwd) sys.stdout = orig_stdout script_vars['memory_delta'].append(mem) return code_output
def execute_code_block(compiler, block, example_globals, script_vars, gallery_conf): """Executes the code block of the example file""" blabel, bcontent, lineno = block # If example is not suitable to run, skip executing its blocks if not script_vars['execute_script'] or blabel == 'text': script_vars['memory_delta'].append(0) return '' cwd = os.getcwd() # Redirect output to stdout and orig_stdout = sys.stdout src_file = script_vars['src_file'] # First cd in the original example dir, so that any file # created by the example get created in this directory my_stdout = MixedEncodingStringIO() os.chdir(os.path.dirname(src_file)) sys.stdout = LoggingTee(my_stdout, logger, src_file) try: dont_inherit = 1 code_ast = compile(bcontent, src_file, 'exec', ast.PyCF_ONLY_AST | compiler.flags, dont_inherit) ast.increment_lineno(code_ast, lineno - 1) # don't use unicode_literals at the top of this file or you get # nasty errors here on Py2.7 _, mem = _memory_usage(_exec_once( compiler(code_ast, src_file, 'exec'), example_globals), gallery_conf) except Exception: sys.stdout.flush() sys.stdout = orig_stdout except_rst = handle_exception(sys.exc_info(), src_file, script_vars, gallery_conf) # python2.7: Code was read in bytes needs decoding to utf-8 # unless future unicode_literals is imported in source which # make ast output unicode strings if hasattr(except_rst, 'decode') and not \ isinstance(except_rst, unicode): except_rst = except_rst.decode('utf-8') code_output = u"\n{0}\n\n\n\n".format(except_rst) # still call this even though we won't use the images so that # figures are closed save_figures(block, script_vars, gallery_conf) mem = 0 else: sys.stdout.flush() sys.stdout = orig_stdout os.chdir(cwd) my_stdout = my_stdout.getvalue().strip().expandtabs() if my_stdout: stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4)) else: stdout = '' images_rst = save_figures(block, script_vars, gallery_conf) code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout) finally: os.chdir(cwd) sys.stdout = orig_stdout script_vars['memory_delta'].append(mem) return code_output
Python
def execute_script(script_blocks, script_vars, gallery_conf): """Execute and capture output from python script already in block structure Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number script_vars : dict Configuration and run time variables gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- output_blocks : list List of strings where each element is the restructured text representation of the output of each block time_elapsed : float Time elapsed during execution """ example_globals = { # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it '__doc__': '', # Examples may contain if __name__ == '__main__' guards # for in example scikit-learn if the example uses multiprocessing '__name__': '__main__', # Don't ever support __file__: Issues #166 #212 } argv_orig = sys.argv[:] if script_vars['execute_script']: # We want to run the example without arguments. See # https://github.com/sphinx-gallery/sphinx-gallery/pull/252 # for more details. sys.argv[0] = script_vars['src_file'] sys.argv[1:] = [] t_start = time() gc.collect() _, memory_start = _memory_usage(lambda: None, gallery_conf) compiler = codeop.Compile() # include at least one entry to avoid max() ever failing script_vars['memory_delta'] = [memory_start] output_blocks = [execute_code_block(compiler, block, example_globals, script_vars, gallery_conf) for block in script_blocks] time_elapsed = time() - t_start script_vars['memory_delta'] = ( # actually turn it into a delta now max(script_vars['memory_delta']) - memory_start) sys.argv = argv_orig # Write md5 checksum if the example was meant to run (no-plot # shall not cache md5sum) and has build correctly if script_vars['execute_script']: with open(script_vars['target_file'] + '.md5', 'w') as file_checksum: file_checksum.write(get_md5sum(script_vars['target_file'])) return output_blocks, time_elapsed
def execute_script(script_blocks, script_vars, gallery_conf): """Execute and capture output from python script already in block structure Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number script_vars : dict Configuration and run time variables gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- output_blocks : list List of strings where each element is the restructured text representation of the output of each block time_elapsed : float Time elapsed during execution """ example_globals = { # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it '__doc__': '', # Examples may contain if __name__ == '__main__' guards # for in example scikit-learn if the example uses multiprocessing '__name__': '__main__', # Don't ever support __file__: Issues #166 #212 } argv_orig = sys.argv[:] if script_vars['execute_script']: # We want to run the example without arguments. See # https://github.com/sphinx-gallery/sphinx-gallery/pull/252 # for more details. sys.argv[0] = script_vars['src_file'] sys.argv[1:] = [] t_start = time() gc.collect() _, memory_start = _memory_usage(lambda: None, gallery_conf) compiler = codeop.Compile() # include at least one entry to avoid max() ever failing script_vars['memory_delta'] = [memory_start] output_blocks = [execute_code_block(compiler, block, example_globals, script_vars, gallery_conf) for block in script_blocks] time_elapsed = time() - t_start script_vars['memory_delta'] = ( # actually turn it into a delta now max(script_vars['memory_delta']) - memory_start) sys.argv = argv_orig # Write md5 checksum if the example was meant to run (no-plot # shall not cache md5sum) and has build correctly if script_vars['execute_script']: with open(script_vars['target_file'] + '.md5', 'w') as file_checksum: file_checksum.write(get_md5sum(script_vars['target_file'])) return output_blocks, time_elapsed
Python
def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """Generate the rst file for a given example. Parameters ---------- fname : str Filename of python script target_dir : str Absolute path to directory in documentation where examples are saved src_dir : str Absolute path to directory where source examples are stored gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- intro: str The introduction of the example time_elapsed : float seconds required to run the script """ src_file = os.path.normpath(os.path.join(src_dir, fname)) target_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, target_file) intro, _ = extract_intro_and_title(fname, get_docstring_and_rest(src_file)[0]) if md5sum_is_current(target_file): return intro, 0 image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path_template = os.path.join(image_dir, image_fname) script_vars = { 'execute_script': executable_script(src_file, gallery_conf), 'image_path_iterator': ImagePathIterator(image_path_template), 'src_file': src_file, 'target_file': target_file} file_conf, script_blocks = split_code_and_text_blocks(src_file) output_blocks, time_elapsed = execute_script(script_blocks, script_vars, gallery_conf) logger.debug("%s ran in : %.2g seconds\n", src_file, time_elapsed) example_rst = rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf) memory_used = gallery_conf['memory_base'] + script_vars['memory_delta'] save_rst_example(example_rst, target_file, time_elapsed, memory_used, gallery_conf) save_thumbnail(image_path_template, src_file, file_conf, gallery_conf) example_nb = jupyter_notebook(script_blocks, gallery_conf) save_notebook(example_nb, replace_py_ipynb(target_file)) return intro, time_elapsed
def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """Generate the rst file for a given example. Parameters ---------- fname : str Filename of python script target_dir : str Absolute path to directory in documentation where examples are saved src_dir : str Absolute path to directory where source examples are stored gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- intro: str The introduction of the example time_elapsed : float seconds required to run the script """ src_file = os.path.normpath(os.path.join(src_dir, fname)) target_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, target_file) intro, _ = extract_intro_and_title(fname, get_docstring_and_rest(src_file)[0]) if md5sum_is_current(target_file): return intro, 0 image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path_template = os.path.join(image_dir, image_fname) script_vars = { 'execute_script': executable_script(src_file, gallery_conf), 'image_path_iterator': ImagePathIterator(image_path_template), 'src_file': src_file, 'target_file': target_file} file_conf, script_blocks = split_code_and_text_blocks(src_file) output_blocks, time_elapsed = execute_script(script_blocks, script_vars, gallery_conf) logger.debug("%s ran in : %.2g seconds\n", src_file, time_elapsed) example_rst = rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf) memory_used = gallery_conf['memory_base'] + script_vars['memory_delta'] save_rst_example(example_rst, target_file, time_elapsed, memory_used, gallery_conf) save_thumbnail(image_path_template, src_file, file_conf, gallery_conf) example_nb = jupyter_notebook(script_blocks, gallery_conf) save_notebook(example_nb, replace_py_ipynb(target_file)) return intro, time_elapsed
Python
def rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf): """Generates the rst string containing the script prose, code and output Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number output_blocks : list List of strings where each element is the restructured text representation of the output of each block file_conf : dict File-specific settings given in source file comments as: ``# sphinx_gallery_<name> = <value>`` gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- out : str rst notebook """ # A simple example has two blocks: one for the # example introduction/explanation and one for the code is_example_notebook_like = len(script_blocks) > 2 example_rst = u"" # there can be unicode content for (blabel, bcontent, lineno), code_output in \ zip(script_blocks, output_blocks): if blabel == 'code': if not file_conf.get('line_numbers', gallery_conf.get('line_numbers', False)): lineno = None code_rst = codestr2rst(bcontent, lineno=lineno) + '\n' if is_example_notebook_like: example_rst += code_rst example_rst += code_output else: example_rst += code_output if 'sphx-glr-script-out' in code_output: # Add some vertical space after output example_rst += "\n\n|\n\n" example_rst += code_rst else: example_rst += bcontent + '\n' return example_rst
def rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf): """Generates the rst string containing the script prose, code and output Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number output_blocks : list List of strings where each element is the restructured text representation of the output of each block file_conf : dict File-specific settings given in source file comments as: ``# sphinx_gallery_<name> = <value>`` gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- out : str rst notebook """ # A simple example has two blocks: one for the # example introduction/explanation and one for the code is_example_notebook_like = len(script_blocks) > 2 example_rst = u"" # there can be unicode content for (blabel, bcontent, lineno), code_output in \ zip(script_blocks, output_blocks): if blabel == 'code': if not file_conf.get('line_numbers', gallery_conf.get('line_numbers', False)): lineno = None code_rst = codestr2rst(bcontent, lineno=lineno) + '\n' if is_example_notebook_like: example_rst += code_rst example_rst += code_output else: example_rst += code_output if 'sphx-glr-script-out' in code_output: # Add some vertical space after output example_rst += "\n\n|\n\n" example_rst += code_rst else: example_rst += bcontent + '\n' return example_rst
Python
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf): """Saves the rst notebook to example_file including necessary header & footer Parameters ---------- example_rst : str rst containing the executed file content example_file : str Filename with full path of python example file in documentation folder time_elapsed : float Time elapsed in seconds while executing file memory_used : float Additional memory used during the run. gallery_conf : dict Sphinx-Gallery configuration dictionary """ ref_fname = os.path.relpath(example_file, gallery_conf['src_dir']) ref_fname = ref_fname.replace(os.path.sep, "_") binder_conf = check_binder_conf(gallery_conf.get('binder')) binder_text = (" or run this example in your browser via Binder" if len(binder_conf) else "") example_rst = (".. note::\n" " :class: sphx-glr-download-link-note\n\n" " Click :ref:`here <sphx_glr_download_{0}>` " "to download the full example code{1}\n" ".. rst-class:: sphx-glr-example-title\n\n" ".. _sphx_glr_{0}:\n\n" ).format(ref_fname, binder_text) + example_rst if time_elapsed >= gallery_conf["min_reported_time"]: time_m, time_s = divmod(time_elapsed, 60) example_rst += ("**Total running time of the script:**" " ({0: .0f} minutes {1: .3f} seconds)\n\n" .format(time_m, time_s)) if gallery_conf['show_memory']: example_rst += ("**Estimated memory usage:** {0: .0f} MB\n\n" .format(memory_used)) # Generate a binder URL if specified binder_badge_rst = '' if len(binder_conf) > 0: binder_badge_rst += gen_binder_rst(example_file, binder_conf, gallery_conf) fname = os.path.basename(example_file) example_rst += CODE_DOWNLOAD.format(fname, replace_py_ipynb(fname), binder_badge_rst, ref_fname) example_rst += SPHX_GLR_SIG write_file = re.sub(r'\.py$', '.rst', example_file) with codecs.open(write_file, 'w', encoding="utf-8") as f: f.write(example_rst)
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf): """Saves the rst notebook to example_file including necessary header & footer Parameters ---------- example_rst : str rst containing the executed file content example_file : str Filename with full path of python example file in documentation folder time_elapsed : float Time elapsed in seconds while executing file memory_used : float Additional memory used during the run. gallery_conf : dict Sphinx-Gallery configuration dictionary """ ref_fname = os.path.relpath(example_file, gallery_conf['src_dir']) ref_fname = ref_fname.replace(os.path.sep, "_") binder_conf = check_binder_conf(gallery_conf.get('binder')) binder_text = (" or run this example in your browser via Binder" if len(binder_conf) else "") example_rst = (".. note::\n" " :class: sphx-glr-download-link-note\n\n" " Click :ref:`here <sphx_glr_download_{0}>` " "to download the full example code{1}\n" ".. rst-class:: sphx-glr-example-title\n\n" ".. _sphx_glr_{0}:\n\n" ).format(ref_fname, binder_text) + example_rst if time_elapsed >= gallery_conf["min_reported_time"]: time_m, time_s = divmod(time_elapsed, 60) example_rst += ("**Total running time of the script:**" " ({0: .0f} minutes {1: .3f} seconds)\n\n" .format(time_m, time_s)) if gallery_conf['show_memory']: example_rst += ("**Estimated memory usage:** {0: .0f} MB\n\n" .format(memory_used)) # Generate a binder URL if specified binder_badge_rst = '' if len(binder_conf) > 0: binder_badge_rst += gen_binder_rst(example_file, binder_conf, gallery_conf) fname = os.path.basename(example_file) example_rst += CODE_DOWNLOAD.format(fname, replace_py_ipynb(fname), binder_badge_rst, ref_fname) example_rst += SPHX_GLR_SIG write_file = re.sub(r'\.py$', '.rst', example_file) with codecs.open(write_file, 'w', encoding="utf-8") as f: f.write(example_rst)
Python
def _import_from(mod, path, mod_dir=None): """ Imports a module from a specific path :param mod: A unicode string of the module name :param path: A unicode string to the directory containing the module :param mod_dir: If the sub directory of "path" is different than the "mod" name, pass the sub directory as a unicode string :return: None if not loaded, otherwise the module """ if mod_dir is None: mod_dir = mod if not os.path.exists(path): return None if not os.path.exists(os.path.join(path, mod_dir)): return None try: mod_info = imp.find_module(mod_dir, [path]) return imp.load_module(mod, *mod_info) except ImportError: return None
def _import_from(mod, path, mod_dir=None): """ Imports a module from a specific path :param mod: A unicode string of the module name :param path: A unicode string to the directory containing the module :param mod_dir: If the sub directory of "path" is different than the "mod" name, pass the sub directory as a unicode string :return: None if not loaded, otherwise the module """ if mod_dir is None: mod_dir = mod if not os.path.exists(path): return None if not os.path.exists(os.path.join(path, mod_dir)): return None try: mod_info = imp.find_module(mod_dir, [path]) return imp.load_module(mod, *mod_info) except ImportError: return None
Python
def make_suite(): """ Constructs a unittest.TestSuite() of all tests for the package. For use with setuptools. :return: A unittest.TestSuite() object """ loader = unittest.TestLoader() suite = unittest.TestSuite() for test_class in test_classes(): tests = loader.loadTestsFromTestCase(test_class) suite.addTests(tests) return suite
def make_suite(): """ Constructs a unittest.TestSuite() of all tests for the package. For use with setuptools. :return: A unittest.TestSuite() object """ loader = unittest.TestLoader() suite = unittest.TestSuite() for test_class in test_classes(): tests = loader.loadTestsFromTestCase(test_class) suite.addTests(tests) return suite
Python
def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate to create the request for. """ is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value
def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate to create the request for. """ is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value
Python
def key_hash_algo(self, value): """ A unicode string of the hash algorithm to use when creating the certificate identifier - "sha1" (default), or "sha256". """ if value not in set(['sha1', 'sha256']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", not %s ''', repr(value) )) self._key_hash_algo = value
def key_hash_algo(self, value): """ A unicode string of the hash algorithm to use when creating the certificate identifier - "sha1" (default), or "sha256". """ if value not in set(['sha1', 'sha256']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", not %s ''', repr(value) )) self._key_hash_algo = value
Python
def nonce(self, value): """ A bool - if the nonce extension should be used to prevent replay attacks. """ if not isinstance(value, bool): raise TypeError(_pretty_message( ''' nonce must be a boolean, not %s ''', _type_name(value) )) self._nonce = value
def nonce(self, value): """ A bool - if the nonce extension should be used to prevent replay attacks. """ if not isinstance(value, bool): raise TypeError(_pretty_message( ''' nonce must be a boolean, not %s ''', _type_name(value) )) self._nonce = value
Python
def build(self, requestor_private_key=None, requestor_certificate=None, other_certificates=None): """ Validates the request information, constructs the ASN.1 structure and then optionally signs it. The requestor_private_key, requestor_certificate and other_certificates params are all optional and only necessary if the request needs to be signed. Signing a request is uncommon for OCSP requests related to web TLS connections. :param requestor_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the request with :param requestor_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :param other_certificates: A list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects that may be useful for the OCSP server to verify the request signature. Intermediate certificates would be specified here. :return: An asn1crypto.ocsp.OCSPRequest object of the request """ def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } tbs_request_extensions = [] request_extensions = [] has_nonce = False for name, value in self._tbs_request_extensions.items(): if name == 'nonce': has_nonce = True tbs_request_extensions.append(_make_extension(name, value)) if self._nonce and not has_nonce: tbs_request_extensions.append( _make_extension('nonce', util.rand_bytes(16)) ) if not tbs_request_extensions: tbs_request_extensions = None for name, value in self._request_extensions.items(): request_extensions.append(_make_extension(name, value)) if not request_extensions: request_extensions = None tbs_request = ocsp.TBSRequest({ 'request_list': [ { 'req_cert': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(self._issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'single_request_extensions': request_extensions } ], 'request_extensions': tbs_request_extensions }) signature = None if requestor_private_key or requestor_certificate or other_certificates: is_oscrypto = isinstance(requestor_private_key, asymmetric.PrivateKey) if not isinstance(requestor_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' requestor_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(requestor_private_key) )) cert_is_oscrypto = isinstance(requestor_certificate, asymmetric.Certificate) if not isinstance(requestor_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' requestor_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(requestor_certificate) )) if other_certificates is not None and not isinstance(other_certificates, list): raise TypeError(_pretty_message( ''' other_certificates must be a list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects, not %s ''', _type_name(other_certificates) )) if cert_is_oscrypto: requestor_certificate = requestor_certificate.asn1 tbs_request['requestor_name'] = x509.GeneralName( name='directory_name', value=requestor_certificate.subject ) certificates = [requestor_certificate] for other_certificate in other_certificates: other_cert_is_oscrypto = isinstance(other_certificate, asymmetric.Certificate) if not isinstance(other_certificate, x509.Certificate) and not other_cert_is_oscrypto: raise TypeError(_pretty_message( ''' other_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(other_certificate) )) if other_cert_is_oscrypto: other_certificate = other_certificate.asn1 certificates.append(other_certificate) signature_algo = requestor_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if requestor_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif requestor_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif requestor_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: requestor_private_key = asymmetric.load_private_key(requestor_private_key) signature_bytes = sign_func(requestor_private_key, tbs_request.dump(), self._hash_algo) signature = ocsp.Signature({ 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certificates }) return ocsp.OCSPRequest({ 'tbs_request': tbs_request, 'optional_signature': signature })
def build(self, requestor_private_key=None, requestor_certificate=None, other_certificates=None): """ Validates the request information, constructs the ASN.1 structure and then optionally signs it. The requestor_private_key, requestor_certificate and other_certificates params are all optional and only necessary if the request needs to be signed. Signing a request is uncommon for OCSP requests related to web TLS connections. :param requestor_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the request with :param requestor_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :param other_certificates: A list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects that may be useful for the OCSP server to verify the request signature. Intermediate certificates would be specified here. :return: An asn1crypto.ocsp.OCSPRequest object of the request """ def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } tbs_request_extensions = [] request_extensions = [] has_nonce = False for name, value in self._tbs_request_extensions.items(): if name == 'nonce': has_nonce = True tbs_request_extensions.append(_make_extension(name, value)) if self._nonce and not has_nonce: tbs_request_extensions.append( _make_extension('nonce', util.rand_bytes(16)) ) if not tbs_request_extensions: tbs_request_extensions = None for name, value in self._request_extensions.items(): request_extensions.append(_make_extension(name, value)) if not request_extensions: request_extensions = None tbs_request = ocsp.TBSRequest({ 'request_list': [ { 'req_cert': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(self._issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'single_request_extensions': request_extensions } ], 'request_extensions': tbs_request_extensions }) signature = None if requestor_private_key or requestor_certificate or other_certificates: is_oscrypto = isinstance(requestor_private_key, asymmetric.PrivateKey) if not isinstance(requestor_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' requestor_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(requestor_private_key) )) cert_is_oscrypto = isinstance(requestor_certificate, asymmetric.Certificate) if not isinstance(requestor_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' requestor_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(requestor_certificate) )) if other_certificates is not None and not isinstance(other_certificates, list): raise TypeError(_pretty_message( ''' other_certificates must be a list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects, not %s ''', _type_name(other_certificates) )) if cert_is_oscrypto: requestor_certificate = requestor_certificate.asn1 tbs_request['requestor_name'] = x509.GeneralName( name='directory_name', value=requestor_certificate.subject ) certificates = [requestor_certificate] for other_certificate in other_certificates: other_cert_is_oscrypto = isinstance(other_certificate, asymmetric.Certificate) if not isinstance(other_certificate, x509.Certificate) and not other_cert_is_oscrypto: raise TypeError(_pretty_message( ''' other_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(other_certificate) )) if other_cert_is_oscrypto: other_certificate = other_certificate.asn1 certificates.append(other_certificate) signature_algo = requestor_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if requestor_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif requestor_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif requestor_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: requestor_private_key = asymmetric.load_private_key(requestor_private_key) signature_bytes = sign_func(requestor_private_key, tbs_request.dump(), self._hash_algo) signature = ocsp.Signature({ 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certificates }) return ocsp.OCSPRequest({ 'tbs_request': tbs_request, 'optional_signature': signature })
Python
def response_status(self, value): """ The overall status of the response. Only a "successful" response will include information about the certificate. Other response types are for signaling info about the OCSP responder. Valid values include: - "successful" - when the response includes information about the certificate - "malformed_request" - when the request could not be understood - "internal_error" - when an internal error occured with the OCSP responder - "try_later" - when the OCSP responder is temporarily unavailable - "sign_required" - when the OCSP request must be signed - "unauthorized" - when the responder is not the correct responder for the certificate """ if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' response_status must be a unicode string, not %s ''', _type_name(value) )) valid_response_statuses = set([ 'successful', 'malformed_request', 'internal_error', 'try_later', 'sign_required', 'unauthorized' ]) if value not in valid_response_statuses: raise ValueError(_pretty_message( ''' response_status must be one of "successful", "malformed_request", "internal_error", "try_later", "sign_required", "unauthorized", not %s ''', repr(value) )) self._response_status = value
def response_status(self, value): """ The overall status of the response. Only a "successful" response will include information about the certificate. Other response types are for signaling info about the OCSP responder. Valid values include: - "successful" - when the response includes information about the certificate - "malformed_request" - when the request could not be understood - "internal_error" - when an internal error occured with the OCSP responder - "try_later" - when the OCSP responder is temporarily unavailable - "sign_required" - when the OCSP request must be signed - "unauthorized" - when the responder is not the correct responder for the certificate """ if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' response_status must be a unicode string, not %s ''', _type_name(value) )) valid_response_statuses = set([ 'successful', 'malformed_request', 'internal_error', 'try_later', 'sign_required', 'unauthorized' ]) if value not in valid_response_statuses: raise ValueError(_pretty_message( ''' response_status must be one of "successful", "malformed_request", "internal_error", "try_later", "sign_required", "unauthorized", not %s ''', repr(value) )) self._response_status = value
Python
def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate the response is about. """ if value is not None: is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value
def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate the response is about. """ if value is not None: is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value
Python
def certificate_status(self, value): """ A unicode string of the status of the certificate. Valid values include: - "good" - when the certificate is in good standing - "revoked" - when the certificate is revoked without a reason code - "key_compromise" - when a private key is compromised - "ca_compromise" - when the CA issuing the certificate is compromised - "affiliation_changed" - when the certificate subject name changed - "superseded" - when the certificate was replaced with a new one - "cessation_of_operation" - when the certificate is no longer needed - "certificate_hold" - when the certificate is temporarily invalid - "remove_from_crl" - only delta CRLs - when temporary hold is removed - "privilege_withdrawn" - one of the usages for a certificate was removed - "unknown" - when the responder doesn't know about the certificate being requested """ if value is not None: if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' certificate_status must be a unicode string, not %s ''', _type_name(value) )) valid_certificate_statuses = set([ 'good', 'revoked', 'key_compromise', 'ca_compromise', 'affiliation_changed', 'superseded', 'cessation_of_operation', 'certificate_hold', 'remove_from_crl', 'privilege_withdrawn', 'unknown', ]) if value not in valid_certificate_statuses: raise ValueError(_pretty_message( ''' certificate_status must be one of "good", "revoked", "key_compromise", "ca_compromise", "affiliation_changed", "superseded", "cessation_of_operation", "certificate_hold", "remove_from_crl", "privilege_withdrawn", "unknown" not %s ''', repr(value) )) self._certificate_status = value
def certificate_status(self, value): """ A unicode string of the status of the certificate. Valid values include: - "good" - when the certificate is in good standing - "revoked" - when the certificate is revoked without a reason code - "key_compromise" - when a private key is compromised - "ca_compromise" - when the CA issuing the certificate is compromised - "affiliation_changed" - when the certificate subject name changed - "superseded" - when the certificate was replaced with a new one - "cessation_of_operation" - when the certificate is no longer needed - "certificate_hold" - when the certificate is temporarily invalid - "remove_from_crl" - only delta CRLs - when temporary hold is removed - "privilege_withdrawn" - one of the usages for a certificate was removed - "unknown" - when the responder doesn't know about the certificate being requested """ if value is not None: if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' certificate_status must be a unicode string, not %s ''', _type_name(value) )) valid_certificate_statuses = set([ 'good', 'revoked', 'key_compromise', 'ca_compromise', 'affiliation_changed', 'superseded', 'cessation_of_operation', 'certificate_hold', 'remove_from_crl', 'privilege_withdrawn', 'unknown', ]) if value not in valid_certificate_statuses: raise ValueError(_pretty_message( ''' certificate_status must be one of "good", "revoked", "key_compromise", "ca_compromise", "affiliation_changed", "superseded", "cessation_of_operation", "certificate_hold", "remove_from_crl", "privilege_withdrawn", "unknown" not %s ''', repr(value) )) self._certificate_status = value
Python
def revocation_date(self, value): """ A datetime.datetime object of when the certificate was revoked, if the status is not "good" or "unknown". """ if value is not None and not isinstance(value, datetime): raise TypeError(_pretty_message( ''' revocation_date must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._revocation_date = value
def revocation_date(self, value): """ A datetime.datetime object of when the certificate was revoked, if the status is not "good" or "unknown". """ if value is not None and not isinstance(value, datetime): raise TypeError(_pretty_message( ''' revocation_date must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._revocation_date = value
Python
def build(self, responder_private_key=None, responder_certificate=None): """ Validates the request information, constructs the ASN.1 structure and signs it. The responder_private_key and responder_certificate parameters are only required if the response_status is "successful". :param responder_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the response with :param responder_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :return: An asn1crypto.ocsp.OCSPResponse object of the response """ if self._response_status != 'successful': return ocsp.OCSPResponse({ 'response_status': self._response_status }) is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey) if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' responder_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(responder_private_key) )) cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate) if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' responder_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(responder_certificate) )) if cert_is_oscrypto: responder_certificate = responder_certificate.asn1 if self._certificate is None: raise ValueError(_pretty_message( ''' certificate must be set if the response_status is "successful" ''' )) if self._certificate_status is None: raise ValueError(_pretty_message( ''' certificate_status must be set if the response_status is "successful" ''' )) def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } response_data_extensions = [] single_response_extensions = [] for name, value in self._response_data_extensions.items(): response_data_extensions.append(_make_extension(name, value)) if self._nonce: response_data_extensions.append( _make_extension('nonce', self._nonce) ) if not response_data_extensions: response_data_extensions = None for name, value in self._single_response_extensions.items(): single_response_extensions.append(_make_extension(name, value)) if self._certificate_issuer: single_response_extensions.append( _make_extension( 'certificate_issuer', [ x509.GeneralName( name='directory_name', value=self._certificate_issuer.subject ) ] ) ) if not single_response_extensions: single_response_extensions = None responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo) if self._certificate_status == 'good': cert_status = ocsp.CertStatus( name='good', value=core.Null() ) elif self._certificate_status == 'unknown': cert_status = ocsp.CertStatus( name='unknown', value=core.Null() ) else: status = self._certificate_status reason = status if status != 'revoked' else 'unspecified' cert_status = ocsp.CertStatus( name='revoked', value={ 'revocation_time': self._revocation_date, 'revocation_reason': reason, } ) issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate if issuer.subject != self._certificate.issuer: raise ValueError(_pretty_message( ''' responder_certificate does not appear to be the issuer for the certificate. Perhaps set the .certificate_issuer attribute? ''' )) produced_at = datetime.now(timezone.utc) if self._this_update is None: self._this_update = produced_at if self._next_update is None: self._next_update = self._this_update + timedelta(days=7) response_data = ocsp.ResponseData({ 'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash), 'produced_at': produced_at, 'responses': [ { 'cert_id': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'cert_status': cert_status, 'this_update': self._this_update, 'next_update': self._next_update, 'single_extensions': single_response_extensions } ], 'response_extensions': response_data_extensions }) signature_algo = responder_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if responder_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif responder_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif responder_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: responder_private_key = asymmetric.load_private_key(responder_private_key) signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo) certs = None if self._certificate_issuer: certs = [responder_certificate] return ocsp.OCSPResponse({ 'response_status': self._response_status, 'response_bytes': { 'response_type': 'basic_ocsp_response', 'response': { 'tbs_response_data': response_data, 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certs } } })
def build(self, responder_private_key=None, responder_certificate=None): """ Validates the request information, constructs the ASN.1 structure and signs it. The responder_private_key and responder_certificate parameters are only required if the response_status is "successful". :param responder_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the response with :param responder_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :return: An asn1crypto.ocsp.OCSPResponse object of the response """ if self._response_status != 'successful': return ocsp.OCSPResponse({ 'response_status': self._response_status }) is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey) if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' responder_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(responder_private_key) )) cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate) if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' responder_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(responder_certificate) )) if cert_is_oscrypto: responder_certificate = responder_certificate.asn1 if self._certificate is None: raise ValueError(_pretty_message( ''' certificate must be set if the response_status is "successful" ''' )) if self._certificate_status is None: raise ValueError(_pretty_message( ''' certificate_status must be set if the response_status is "successful" ''' )) def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } response_data_extensions = [] single_response_extensions = [] for name, value in self._response_data_extensions.items(): response_data_extensions.append(_make_extension(name, value)) if self._nonce: response_data_extensions.append( _make_extension('nonce', self._nonce) ) if not response_data_extensions: response_data_extensions = None for name, value in self._single_response_extensions.items(): single_response_extensions.append(_make_extension(name, value)) if self._certificate_issuer: single_response_extensions.append( _make_extension( 'certificate_issuer', [ x509.GeneralName( name='directory_name', value=self._certificate_issuer.subject ) ] ) ) if not single_response_extensions: single_response_extensions = None responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo) if self._certificate_status == 'good': cert_status = ocsp.CertStatus( name='good', value=core.Null() ) elif self._certificate_status == 'unknown': cert_status = ocsp.CertStatus( name='unknown', value=core.Null() ) else: status = self._certificate_status reason = status if status != 'revoked' else 'unspecified' cert_status = ocsp.CertStatus( name='revoked', value={ 'revocation_time': self._revocation_date, 'revocation_reason': reason, } ) issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate if issuer.subject != self._certificate.issuer: raise ValueError(_pretty_message( ''' responder_certificate does not appear to be the issuer for the certificate. Perhaps set the .certificate_issuer attribute? ''' )) produced_at = datetime.now(timezone.utc) if self._this_update is None: self._this_update = produced_at if self._next_update is None: self._next_update = self._this_update + timedelta(days=7) response_data = ocsp.ResponseData({ 'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash), 'produced_at': produced_at, 'responses': [ { 'cert_id': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'cert_status': cert_status, 'this_update': self._this_update, 'next_update': self._next_update, 'single_extensions': single_response_extensions } ], 'response_extensions': response_data_extensions }) signature_algo = responder_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if responder_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif responder_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif responder_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: responder_private_key = asymmetric.load_private_key(responder_private_key) signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo) certs = None if self._certificate_issuer: certs = [responder_certificate] return ocsp.OCSPResponse({ 'response_status': self._response_status, 'response_bytes': { 'response_type': 'basic_ocsp_response', 'response': { 'tbs_response_data': response_data, 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certs } } })
Python
def read_config(): """ Reads config.json to get configuration settings """ with open('config.json') as config_file: d = json.loads(config_file.read()) global application_host, application_port, application_debug application_host = d["application"]["host"] application_port = d["application"]["port"] application_debug = d["application"]["debug"] global use_project_to_channel_map, use_project_bugs_to_channel_map global use_project_to_channel_pattern, project_to_channel_pattern global use_bug_specific_channel, bug_channel_postfix global use_attachments use_project_to_channel_map = d["features"]["use_project_to_channel_map"] use_project_bugs_to_channel_map = d["features"]["use_project_bugs_to_channel_map"] use_project_to_channel_pattern = d["features"]["use_project_to_channel_pattern"] project_to_channel_pattern = d["features"]["project_to_channel_pattern"] use_bug_specific_channel = d["features"]["use_bug_specific_channel"] bug_channel_postfix = d["features"]["bug_channel_postfix"] use_attachments = d["features"]["use_attachments"] global attachment_color attachment_color = d["colors"]["attachment"] global webhook_url, mattermost_user, mattermost_icon webhook_url = d["mattermost"]["webhook"] mattermost_user = d["mattermost"]["post_user_name"] mattermost_icon = d["mattermost"]["post_user_icon"] global jira_url jira_url = d["jira"]["url"]
def read_config(): """ Reads config.json to get configuration settings """ with open('config.json') as config_file: d = json.loads(config_file.read()) global application_host, application_port, application_debug application_host = d["application"]["host"] application_port = d["application"]["port"] application_debug = d["application"]["debug"] global use_project_to_channel_map, use_project_bugs_to_channel_map global use_project_to_channel_pattern, project_to_channel_pattern global use_bug_specific_channel, bug_channel_postfix global use_attachments use_project_to_channel_map = d["features"]["use_project_to_channel_map"] use_project_bugs_to_channel_map = d["features"]["use_project_bugs_to_channel_map"] use_project_to_channel_pattern = d["features"]["use_project_to_channel_pattern"] project_to_channel_pattern = d["features"]["project_to_channel_pattern"] use_bug_specific_channel = d["features"]["use_bug_specific_channel"] bug_channel_postfix = d["features"]["bug_channel_postfix"] use_attachments = d["features"]["use_attachments"] global attachment_color attachment_color = d["colors"]["attachment"] global webhook_url, mattermost_user, mattermost_icon webhook_url = d["mattermost"]["webhook"] mattermost_user = d["mattermost"]["post_user_name"] mattermost_icon = d["mattermost"]["post_user_icon"] global jira_url jira_url = d["jira"]["url"]
Python
def send_webhook(project_key, issue_type, text): """ Sends the formatted message to the configured Mattermost webhook URL """ if len(project_key) > 0: channel = get_channel(project_key, issue_type) data = { "channel": channel, "username": mattermost_user, "icon_url": mattermost_icon } if use_attachments: data["attachments"] = [{ "color": attachment_color, "text": text }] else: data["text"] = text response = requests.post( webhook_url, data=json.dumps(data), headers={'Content-Type': 'application/json'} ) return response
def send_webhook(project_key, issue_type, text): """ Sends the formatted message to the configured Mattermost webhook URL """ if len(project_key) > 0: channel = get_channel(project_key, issue_type) data = { "channel": channel, "username": mattermost_user, "icon_url": mattermost_icon } if use_attachments: data["attachments"] = [{ "color": attachment_color, "text": text }] else: data["text"] = text response = requests.post( webhook_url, data=json.dumps(data), headers={'Content-Type': 'application/json'} ) return response
Python
def format_changelog(changelog_items): """ The changelog can record 1+ changes to an issue """ output = "" if len(changelog_items) > 1: output = "\n" for item in changelog_items: output += "Field **" + item["field"] + "** updated from _" + \ item["fromString"].encode('ascii','ignore').strip() + "_ to _" + \ item["toString"].encode('ascii','ignore').strip() + "_\n" return output
def format_changelog(changelog_items): """ The changelog can record 1+ changes to an issue """ output = "" if len(changelog_items) > 1: output = "\n" for item in changelog_items: output += "Field **" + item["field"] + "** updated from _" + \ item["fromString"].encode('ascii','ignore').strip() + "_ to _" + \ item["toString"].encode('ascii','ignore').strip() + "_\n" return output
Python
def _stream_container_logs(base_url: str, timeout: int, container_id: str) -> None: """ Stream container logs back to stdout Args: - base_url (str): URL for a Docker daemon server - timeout (int): timeout for docker api requests - container_id (str): ID of a container to stream logs """ import docker client = docker.APIClient(base_url=base_url, timeout=timeout, version="auto") for log in client.logs(container=container_id, stream=True, follow=True): print(str(log, "utf-8").rstrip())
def _stream_container_logs(base_url: str, timeout: int, container_id: str) -> None: """ Stream container logs back to stdout Args: - base_url (str): URL for a Docker daemon server - timeout (int): timeout for docker api requests - container_id (str): ID of a container to stream logs """ import docker client = docker.APIClient(base_url=base_url, timeout=timeout, version="auto") for log in client.logs(container=container_id, stream=True, follow=True): print(str(log, "utf-8").rstrip())
Python
def deploy_flow(self, flow_run: GraphQLResult) -> str: """ Deploy flow runs on your local machine as Docker containers Args: - flow_run (GraphQLResult): A GraphQLResult flow run object Returns: - str: Information about the deployment """ # 'import docker' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import docker run_config = self._get_run_config(flow_run, DockerRun) assert run_config is None or isinstance(run_config, DockerRun) # mypy image = get_flow_image(flow_run=flow_run) env_vars = self.populate_env_vars(flow_run, image, run_config=run_config) if not self.no_pull and len(image.split("/")) > 1: self.logger.info("Pulling image {}...".format(image)) registry = image.split("/")[0] if self.reg_allow_list and registry not in self.reg_allow_list: self.logger.error( "Trying to pull image from a Docker registry '{}' which" " is not in the reg_allow_list".format(registry) ) raise ValueError( "Trying to pull image from a Docker registry '{}' which" " is not in the reg_allow_list".format(registry) ) else: pull_output = self.docker_client.pull(image, stream=True, decode=True) for line in pull_output: self.logger.debug(line) self.logger.info("Successfully pulled image {}".format(image)) # Create any named volumes (if they do not already exist) for named_volume_name in self.named_volumes: try: self.docker_client.inspect_volume(name=named_volume_name) except docker.errors.APIError: self.logger.debug("Creating named volume {}".format(named_volume_name)) self.docker_client.create_volume( name=named_volume_name, driver="local", labels={"prefect_created": "true"}, ) # Create a container self.logger.debug("Creating Docker container {}".format(image)) host_config = { "auto_remove": True, # Compatibility for linux -- https://github.com/docker/cli/issues/2290 "extra_hosts": {"host.docker.internal": "host-gateway"}, } # type: dict container_mount_paths = self.container_mount_paths if container_mount_paths: host_config.update(binds=self.host_spec) if run_config is not None and run_config.host_config: # The host_config passed from the run_config will overwrite defaults host_config.update(run_config.host_config) networking_config = None # At the time of creation, you can only connect a container to a single network, # however you can create more connections after creation. # Connect first network in the creation step. If no network is connected here the container # is connected to the default `bridge` network. # The rest of the networks are connected after creation. if self.networks: networking_config = self.docker_client.create_networking_config( {self.networks[0]: self.docker_client.create_endpoint_config()} ) labels = { "io.prefect.flow-name": flow_run.flow.name, "io.prefect.flow-id": flow_run.flow.id, "io.prefect.flow-run-id": flow_run.id, } # Generate a container name to match the flow run name, ensuring it is docker # compatible and unique. Must match `[a-zA-Z0-9][a-zA-Z0-9_.-]+` in the end container_name = slugified_name = ( slugify( flow_run.name, lowercase=False, # Docker does not limit length but URL limits apply eventually so # limit the length for safety max_length=250, # Docker allows these characters for container names regex_pattern=r"[^a-zA-Z0-9_.-]+", ).lstrip( # Docker does not allow leading underscore, dash, or period "_-." ) # Docker does not allow 0 character names so use the flow run id if name # would be empty after cleaning or flow_run.id ) # Create the container with retries on name conflicts index = 0 # will be bumped on name colissions while True: try: container = self.docker_client.create_container( image, command=get_flow_run_command(flow_run), environment=env_vars, name=container_name, volumes=container_mount_paths, host_config=self.docker_client.create_host_config(**host_config), networking_config=networking_config, labels=labels, ) except docker.errors.APIError as exc: if "Conflict" in str(exc) and "container name" in str(exc): index += 1 container_name = f"{slugified_name}-{index}" else: raise else: break # Connect the rest of the networks if self.networks: for network in self.networks[1:]: self.docker_client.connect_container_to_network( container=container, net_id=network ) # Start the container self.logger.debug( f"Starting Docker container with ID {container.get('Id')} and " f"name {container_name!r}" ) if self.networks: self.logger.debug( "Adding container with ID {} to docker networks: {}.".format( container.get("Id"), self.networks ) ) self.docker_client.start(container=container.get("Id")) if self.show_flow_logs: self.stream_flow_logs(container.get("Id")) self.logger.debug("Docker container {} started".format(container.get("Id"))) return "Container ID: {}".format(container.get("Id"))
def deploy_flow(self, flow_run: GraphQLResult) -> str: """ Deploy flow runs on your local machine as Docker containers Args: - flow_run (GraphQLResult): A GraphQLResult flow run object Returns: - str: Information about the deployment """ # 'import docker' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import docker run_config = self._get_run_config(flow_run, DockerRun) assert run_config is None or isinstance(run_config, DockerRun) # mypy image = get_flow_image(flow_run=flow_run) env_vars = self.populate_env_vars(flow_run, image, run_config=run_config) if not self.no_pull and len(image.split("/")) > 1: self.logger.info("Pulling image {}...".format(image)) registry = image.split("/")[0] if self.reg_allow_list and registry not in self.reg_allow_list: self.logger.error( "Trying to pull image from a Docker registry '{}' which" " is not in the reg_allow_list".format(registry) ) raise ValueError( "Trying to pull image from a Docker registry '{}' which" " is not in the reg_allow_list".format(registry) ) else: pull_output = self.docker_client.pull(image, stream=True, decode=True) for line in pull_output: self.logger.debug(line) self.logger.info("Successfully pulled image {}".format(image)) # Create any named volumes (if they do not already exist) for named_volume_name in self.named_volumes: try: self.docker_client.inspect_volume(name=named_volume_name) except docker.errors.APIError: self.logger.debug("Creating named volume {}".format(named_volume_name)) self.docker_client.create_volume( name=named_volume_name, driver="local", labels={"prefect_created": "true"}, ) # Create a container self.logger.debug("Creating Docker container {}".format(image)) host_config = { "auto_remove": True, # Compatibility for linux -- https://github.com/docker/cli/issues/2290 "extra_hosts": {"host.docker.internal": "host-gateway"}, } # type: dict container_mount_paths = self.container_mount_paths if container_mount_paths: host_config.update(binds=self.host_spec) if run_config is not None and run_config.host_config: # The host_config passed from the run_config will overwrite defaults host_config.update(run_config.host_config) networking_config = None # At the time of creation, you can only connect a container to a single network, # however you can create more connections after creation. # Connect first network in the creation step. If no network is connected here the container # is connected to the default `bridge` network. # The rest of the networks are connected after creation. if self.networks: networking_config = self.docker_client.create_networking_config( {self.networks[0]: self.docker_client.create_endpoint_config()} ) labels = { "io.prefect.flow-name": flow_run.flow.name, "io.prefect.flow-id": flow_run.flow.id, "io.prefect.flow-run-id": flow_run.id, } # Generate a container name to match the flow run name, ensuring it is docker # compatible and unique. Must match `[a-zA-Z0-9][a-zA-Z0-9_.-]+` in the end container_name = slugified_name = ( slugify( flow_run.name, lowercase=False, # Docker does not limit length but URL limits apply eventually so # limit the length for safety max_length=250, # Docker allows these characters for container names regex_pattern=r"[^a-zA-Z0-9_.-]+", ).lstrip( # Docker does not allow leading underscore, dash, or period "_-." ) # Docker does not allow 0 character names so use the flow run id if name # would be empty after cleaning or flow_run.id ) # Create the container with retries on name conflicts index = 0 # will be bumped on name colissions while True: try: container = self.docker_client.create_container( image, command=get_flow_run_command(flow_run), environment=env_vars, name=container_name, volumes=container_mount_paths, host_config=self.docker_client.create_host_config(**host_config), networking_config=networking_config, labels=labels, ) except docker.errors.APIError as exc: if "Conflict" in str(exc) and "container name" in str(exc): index += 1 container_name = f"{slugified_name}-{index}" else: raise else: break # Connect the rest of the networks if self.networks: for network in self.networks[1:]: self.docker_client.connect_container_to_network( container=container, net_id=network ) # Start the container self.logger.debug( f"Starting Docker container with ID {container.get('Id')} and " f"name {container_name!r}" ) if self.networks: self.logger.debug( "Adding container with ID {} to docker networks: {}.".format( container.get("Id"), self.networks ) ) self.docker_client.start(container=container.get("Id")) if self.show_flow_logs: self.stream_flow_logs(container.get("Id")) self.logger.debug("Docker container {} started".format(container.get("Id"))) return "Container ID: {}".format(container.get("Id"))
Python
def stream_flow_logs(self, container_id: str) -> None: """Stream container logs back to stdout. Args: - container_id (str): ID of container """ # All arguments to multiprocessing.Process need to be pickleable proc = multiprocessing.Process( target=_stream_container_logs, kwargs={ "base_url": self.base_url, "timeout": self.docker_client_timeout, "container_id": container_id, }, ) proc.start() self.processes.append(proc)
def stream_flow_logs(self, container_id: str) -> None: """Stream container logs back to stdout. Args: - container_id (str): ID of container """ # All arguments to multiprocessing.Process need to be pickleable proc = multiprocessing.Process( target=_stream_container_logs, kwargs={ "base_url": self.base_url, "timeout": self.docker_client_timeout, "container_id": container_id, }, ) proc.start() self.processes.append(proc)
Python
def populate_env_vars( self, flow_run: GraphQLResult, image: str, run_config: DockerRun = None ) -> dict: """ Populate metadata and variables in the environment variables for a flow run Args: - flow_run (GraphQLResult): A flow run object - image (str): The image for this flow - run_config (DockerRun, optional): The `run_config` for the flow, if any. Returns: - dict: a dictionary representing the populated environment variables """ if "localhost" in config.cloud.api: api = "http://host.docker.internal:{}".format(config.server.port) else: api = config.cloud.api env = {} # Populate environment variables, later sources overriding # 1. Logging level from config # Default to the config logging level, allowing it to be overriden # by later config soruces env.update({"PREFECT__LOGGING__LEVEL": config.logging.level}) # 2. Values set on the agent via `--env` env.update(self.env_vars) # 3. Values set on a DockerRun RunConfig (if present) if run_config is not None and run_config.env is not None: env.update(run_config.env) # 4. Non-overrideable required env vars env.update( { "PREFECT__BACKEND": config.backend, "PREFECT__CLOUD__API": api, "PREFECT__CLOUD__AUTH_TOKEN": ( # Pull an auth token if it exists but fall back to an API key so # flows in pre-0.15.0 containers still authenticate correctly config.cloud.agent.get("auth_token") or self.flow_run_api_key or "" ), "PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "", "PREFECT__CLOUD__TENANT_ID": ( # Providing a tenant id is only necessary for API keys (not tokens) self.client.tenant_id if self.flow_run_api_key else "" ), "PREFECT__CLOUD__AGENT__LABELS": str(self.labels), "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS": str(self.log_to_cloud).lower(), "PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore "PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id, # type: ignore "PREFECT__CONTEXT__IMAGE": image, "PREFECT__CLOUD__USE_LOCAL_SECRETS": "false", "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner", "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner", # Backwards compatibility variable for containers on Prefect <0.15.0 "PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(), } ) return env
def populate_env_vars( self, flow_run: GraphQLResult, image: str, run_config: DockerRun = None ) -> dict: """ Populate metadata and variables in the environment variables for a flow run Args: - flow_run (GraphQLResult): A flow run object - image (str): The image for this flow - run_config (DockerRun, optional): The `run_config` for the flow, if any. Returns: - dict: a dictionary representing the populated environment variables """ if "localhost" in config.cloud.api: api = "http://host.docker.internal:{}".format(config.server.port) else: api = config.cloud.api env = {} # Populate environment variables, later sources overriding # 1. Logging level from config # Default to the config logging level, allowing it to be overriden # by later config soruces env.update({"PREFECT__LOGGING__LEVEL": config.logging.level}) # 2. Values set on the agent via `--env` env.update(self.env_vars) # 3. Values set on a DockerRun RunConfig (if present) if run_config is not None and run_config.env is not None: env.update(run_config.env) # 4. Non-overrideable required env vars env.update( { "PREFECT__BACKEND": config.backend, "PREFECT__CLOUD__API": api, "PREFECT__CLOUD__AUTH_TOKEN": ( # Pull an auth token if it exists but fall back to an API key so # flows in pre-0.15.0 containers still authenticate correctly config.cloud.agent.get("auth_token") or self.flow_run_api_key or "" ), "PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "", "PREFECT__CLOUD__TENANT_ID": ( # Providing a tenant id is only necessary for API keys (not tokens) self.client.tenant_id if self.flow_run_api_key else "" ), "PREFECT__CLOUD__AGENT__LABELS": str(self.labels), "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS": str(self.log_to_cloud).lower(), "PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore "PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id, # type: ignore "PREFECT__CONTEXT__IMAGE": image, "PREFECT__CLOUD__USE_LOCAL_SECRETS": "false", "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner", "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner", # Backwards compatibility variable for containers on Prefect <0.15.0 "PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(), } ) return env
Python
def _interval_validation(self, data: dict, **kwargs: Any) -> dict: """ Ensures interval is at least one minute in length """ if data["interval"] / 1e6 < 60: raise ValueError( "Interval can not be less than one minute when deploying to Prefect Cloud." ) return data
def _interval_validation(self, data: dict, **kwargs: Any) -> dict: """ Ensures interval is at least one minute in length """ if data["interval"] / 1e6 < 60: raise ValueError( "Interval can not be less than one minute when deploying to Prefect Cloud." ) return data
Python
async def fetch_posts(urls): """ Wrapper that allows blogposts to be fetched asynchronously :param urls: List of blog urls """ coroutines = [parse_blog_post(url) for url in urls] await asyncio.wait(coroutines)
async def fetch_posts(urls): """ Wrapper that allows blogposts to be fetched asynchronously :param urls: List of blog urls """ coroutines = [parse_blog_post(url) for url in urls] await asyncio.wait(coroutines)
Python
async def parse_blog_post(blog_link): """ Given a blog post's URL, this function GETs it and pulls the body out. We then analyze each word with NTLK and write some data into redis. :param blog_link: String :return: Raw text from the blog post w/html tags stripped out """ global blogs_scraped_counter global word_count print('Fetching raw text for {}'.format(blog_link)) soup = BeautifulSoup(r.get(blog_link).content, features='html.parser') post_text = soup.find( 'div', attrs={'class': parser_util.POST_BODY_CLASS}).get_text() sanitized_post_text = parser_util.sanitize_blogpost( post_text, translator=parser_util.sentence_translator) print('Successfully parsed post.') if parser_util.DEBUG_MODE: print('\nSanitized blogpost:\n{clean}\n\nOriginal text:{orig}'.format(clean=sanitized_post_text, orig=post_text)) print('Adding sentence information to postgres.') for sentence in sanitized_post_text.split('.'): # TODO: Implement word2vec and send actual vector instead of empty tuple pg.update_sentence_details(sentence, blog_link, '{}') for word in sentence.split(' '): analyze_word(word.strip(), blog_link) blogs_scraped_counter = blogs_scraped_counter + 1
async def parse_blog_post(blog_link): """ Given a blog post's URL, this function GETs it and pulls the body out. We then analyze each word with NTLK and write some data into redis. :param blog_link: String :return: Raw text from the blog post w/html tags stripped out """ global blogs_scraped_counter global word_count print('Fetching raw text for {}'.format(blog_link)) soup = BeautifulSoup(r.get(blog_link).content, features='html.parser') post_text = soup.find( 'div', attrs={'class': parser_util.POST_BODY_CLASS}).get_text() sanitized_post_text = parser_util.sanitize_blogpost( post_text, translator=parser_util.sentence_translator) print('Successfully parsed post.') if parser_util.DEBUG_MODE: print('\nSanitized blogpost:\n{clean}\n\nOriginal text:{orig}'.format(clean=sanitized_post_text, orig=post_text)) print('Adding sentence information to postgres.') for sentence in sanitized_post_text.split('.'): # TODO: Implement word2vec and send actual vector instead of empty tuple pg.update_sentence_details(sentence, blog_link, '{}') for word in sentence.split(' '): analyze_word(word.strip(), blog_link) blogs_scraped_counter = blogs_scraped_counter + 1
Python
def analyze_word(word, blog_link): """ Given a word, we figure out its POS and store various info in redis. :param word: str :param blog_link: url that the word came from, only used for logging :return: tuple containing the word and POS of that word """ global word_count word_count = word_count + 1 # Now we do some nltk wizardry try: pos_array = nltk.pos_tag([word]) pos_tuple = pos_array[0] pos = pos_tuple[1] # Send some info to the db pg.update_word_details(word, pos) pg.update_blog_details(word, blog_link) if pos in pos_counts: pos_counts[pos] = pos_counts[pos] + 1 else: pos_counts[pos] = 1 # we don't actually need this but it's useful for testing return pos_tuple except IndexError: # This is the only instance in which an exception is actually cause for concern if len(word) > 0: print('Failed to nltk-ify a post.\nURL: {url}\nWord: {word}'.format(url=blog_link, word=word)) except Exception as e: print('Word analyzer encountered an unexpected exception on word: {w}\n Exception:{e}'.format(w=word, e=e))
def analyze_word(word, blog_link): """ Given a word, we figure out its POS and store various info in redis. :param word: str :param blog_link: url that the word came from, only used for logging :return: tuple containing the word and POS of that word """ global word_count word_count = word_count + 1 # Now we do some nltk wizardry try: pos_array = nltk.pos_tag([word]) pos_tuple = pos_array[0] pos = pos_tuple[1] # Send some info to the db pg.update_word_details(word, pos) pg.update_blog_details(word, blog_link) if pos in pos_counts: pos_counts[pos] = pos_counts[pos] + 1 else: pos_counts[pos] = 1 # we don't actually need this but it's useful for testing return pos_tuple except IndexError: # This is the only instance in which an exception is actually cause for concern if len(word) > 0: print('Failed to nltk-ify a post.\nURL: {url}\nWord: {word}'.format(url=blog_link, word=word)) except Exception as e: print('Word analyzer encountered an unexpected exception on word: {w}\n Exception:{e}'.format(w=word, e=e))
Python
def _execute_query(query): """ Fetches a connection to our pg db """ try: _db_cursor.execute(query) result = _db_cursor.fetchall() except pg.ProgrammingError: result = None except pg.IntegrityError: print('Violated a DB constraint with query: ' + query) result = None return result
def _execute_query(query): """ Fetches a connection to our pg db """ try: _db_cursor.execute(query) result = _db_cursor.fetchall() except pg.ProgrammingError: result = None except pg.IntegrityError: print('Violated a DB constraint with query: ' + query) result = None return result
Python
def update_word_details(word, pos): """ Given a word and a part of speech, we update the word_details table :param word: it's uh.. a word. Pulled from the blog post being parsed :param pos: part of speech as determined by NLTK """ if len(word) < 1: print('Skipping empty word.') else: _execute_query(_WORD_UPDATE_QUERY.format(word=word, pos=pos))
def update_word_details(word, pos): """ Given a word and a part of speech, we update the word_details table :param word: it's uh.. a word. Pulled from the blog post being parsed :param pos: part of speech as determined by NLTK """ if len(word) < 1: print('Skipping empty word.') else: _execute_query(_WORD_UPDATE_QUERY.format(word=word, pos=pos))
Python
def update_blog_details(word, url): """ Given a word and a url, we update the blog_details table :param word: yeah again.. it's a word :param url: blog's url """ if len(word) < 1: print('Skipping empty word.') else: _execute_query(_BLOG_UPDATE_QUERY.format(word=word, url=url))
def update_blog_details(word, url): """ Given a word and a url, we update the blog_details table :param word: yeah again.. it's a word :param url: blog's url """ if len(word) < 1: print('Skipping empty word.') else: _execute_query(_BLOG_UPDATE_QUERY.format(word=word, url=url))
Python
def update_sentence_details(sentence, url, vector): """ Given a sentence, blog url, and vector, update the sentence details table. :param sentence: Sanitized and pruned sentence :param url: link to the blog sentence came from :param vector: word2vec vector """ sentence_length = len(sentence) if sentence_length < 1: print("Skipping empty sentence.") else: _execute_query(_SENTENCE_UPDATE_QUERY.format(sentence=sentence, length=sentence_length, vector=vector, url=url))
def update_sentence_details(sentence, url, vector): """ Given a sentence, blog url, and vector, update the sentence details table. :param sentence: Sanitized and pruned sentence :param url: link to the blog sentence came from :param vector: word2vec vector """ sentence_length = len(sentence) if sentence_length < 1: print("Skipping empty sentence.") else: _execute_query(_SENTENCE_UPDATE_QUERY.format(sentence=sentence, length=sentence_length, vector=vector, url=url))
Python
def sanitize_blogpost(post, translator=word_translator): """ This function removes punctuation, newlines, and double spaces so that nltk has a fighting chance of parsing a scraped blogpost. :param post: Raw post text from Soup :param translator: string translator that cleans up punctuation :return: cleaned text from the post body """ return post.replace('\n\n', '\n') \ .replace('\r', ' ').replace('\n', ' ') \ .translate(translator) \ .replace(' ', ' ') \ .strip() \ .lower()
def sanitize_blogpost(post, translator=word_translator): """ This function removes punctuation, newlines, and double spaces so that nltk has a fighting chance of parsing a scraped blogpost. :param post: Raw post text from Soup :param translator: string translator that cleans up punctuation :return: cleaned text from the post body """ return post.replace('\n\n', '\n') \ .replace('\r', ' ').replace('\n', ' ') \ .translate(translator) \ .replace(' ', ' ') \ .strip() \ .lower()
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train' ) ]) message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test' ) ]) return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train' ) ]) message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test' ) ]) return message
Python
def construct_model(): """Construct LBANN model. Pilot1 Combo model """ import lbann # Layer graph data = lbann.Input(data_field='samples') responses = lbann.Input(data_field='responses') pred = combo.Combo()(data) mse = lbann.MeanSquaredError([responses, pred]) SS_res = lbann.Reduction(lbann.Square(lbann.Subtract(responses, pred)), mode='sum') #SS_tot = var(x) = mean((x-mean(x))^2) mini_batch_size = lbann.MiniBatchSize() mean = lbann.Divide(lbann.BatchwiseReduceSum(responses), mini_batch_size) SS_tot = lbann.Divide(lbann.BatchwiseReduceSum(lbann.Square(lbann.Subtract(responses, mean))), mini_batch_size) eps = lbann.Constant(value=1e-07,hint_layer=SS_tot) r2 = lbann.Subtract(lbann.Constant(value=1, num_neurons='1'), lbann.Divide(SS_res, lbann.Add(SS_tot,eps))) metrics = [lbann.Metric(mse, name='mse')] metrics.append(lbann.Metric(r2, name='r2')) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Construct model num_epochs = 100 layers = list(lbann.traverse_layer_graph([data, responses])) return lbann.Model(num_epochs, layers=layers, metrics=metrics, objective_function=mse, callbacks=callbacks)
def construct_model(): """Construct LBANN model. Pilot1 Combo model """ import lbann # Layer graph data = lbann.Input(data_field='samples') responses = lbann.Input(data_field='responses') pred = combo.Combo()(data) mse = lbann.MeanSquaredError([responses, pred]) SS_res = lbann.Reduction(lbann.Square(lbann.Subtract(responses, pred)), mode='sum') #SS_tot = var(x) = mean((x-mean(x))^2) mini_batch_size = lbann.MiniBatchSize() mean = lbann.Divide(lbann.BatchwiseReduceSum(responses), mini_batch_size) SS_tot = lbann.Divide(lbann.BatchwiseReduceSum(lbann.Square(lbann.Subtract(responses, mean))), mini_batch_size) eps = lbann.Constant(value=1e-07,hint_layer=SS_tot) r2 = lbann.Subtract(lbann.Constant(value=1, num_neurons='1'), lbann.Divide(SS_res, lbann.Add(SS_tot,eps))) metrics = [lbann.Metric(mse, name='mse')] metrics.append(lbann.Metric(r2, name='r2')) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Construct model num_epochs = 100 layers = list(lbann.traverse_layer_graph([data, responses])) return lbann.Model(num_epochs, layers=layers, metrics=metrics, objective_function=mse, callbacks=callbacks)
Python
def _positional_encoding(self, sequence_length): """Positional encodings corresponding to a sequence length. PE(pos,2*i) = sin( pos / 10000**(2*i/hidden_size) ) PE(pos,2*i+1) = cos( pos / 10000**(2*i/hidden_size) ) Encodings are memoized. """ # Construct positional encoding if not in cache if sequence_length not in self._positional_encoding_cache: vals = [] for pos in range(sequence_length): for i in range((self.hidden_size+1) // 2): x = pos / 10000**(2*i/self.hidden_size) vals.append(math.sin(x)) vals.append(math.cos(x)) if self.hidden_size % 2 != 0: vals.pop() weights = lbann.Weights( initializer=lbann.ValueInitializer(values=str_list(vals)), optimizer=None, name=f'{self.name}_positional{sequence_length}_weights', ) self._positional_encoding_cache[sequence_length] = lbann.WeightsLayer( dims=str_list([sequence_length, self.hidden_size]), weights=weights, name=f'{self.name}_positional{sequence_length}', ) # Return cached positional encoding return self._positional_encoding_cache[sequence_length]
def _positional_encoding(self, sequence_length): """Positional encodings corresponding to a sequence length. PE(pos,2*i) = sin( pos / 10000**(2*i/hidden_size) ) PE(pos,2*i+1) = cos( pos / 10000**(2*i/hidden_size) ) Encodings are memoized. """ # Construct positional encoding if not in cache if sequence_length not in self._positional_encoding_cache: vals = [] for pos in range(sequence_length): for i in range((self.hidden_size+1) // 2): x = pos / 10000**(2*i/self.hidden_size) vals.append(math.sin(x)) vals.append(math.cos(x)) if self.hidden_size % 2 != 0: vals.pop() weights = lbann.Weights( initializer=lbann.ValueInitializer(values=str_list(vals)), optimizer=None, name=f'{self.name}_positional{sequence_length}_weights', ) self._positional_encoding_cache[sequence_length] = lbann.WeightsLayer( dims=str_list([sequence_length, self.hidden_size]), weights=weights, name=f'{self.name}_positional{sequence_length}', ) # Return cached positional encoding return self._positional_encoding_cache[sequence_length]
Python
def _subsequent_mask(self, size): """Attention mask to prevent attending to subsequent positions. The (i,j) entry is -1e9 if i<j and is 0 otherwise. Masks are memoized. """ # Construct mask if not in cache if size not in self._subsequent_mask_cache: vals = np.triu(np.full((size,size), -1e9), k=1) weights = lbann.Weights( initializer=lbann.ValueInitializer(values=str_list(np.nditer(vals))), optimizer=None, name=f'{self.name}_mask{size}_weights', ) self._subsequent_mask_cache[size] = lbann.WeightsLayer( dims=str_list([size, size]), weights=weights, name=f'{self.name}_mask{size}', ) # Return cached mask return self._subsequent_mask_cache[size]
def _subsequent_mask(self, size): """Attention mask to prevent attending to subsequent positions. The (i,j) entry is -1e9 if i<j and is 0 otherwise. Masks are memoized. """ # Construct mask if not in cache if size not in self._subsequent_mask_cache: vals = np.triu(np.full((size,size), -1e9), k=1) weights = lbann.Weights( initializer=lbann.ValueInitializer(values=str_list(np.nditer(vals))), optimizer=None, name=f'{self.name}_mask{size}_weights', ) self._subsequent_mask_cache[size] = lbann.WeightsLayer( dims=str_list([size, size]), weights=weights, name=f'{self.name}_mask{size}', ) # Return cached mask return self._subsequent_mask_cache[size]
Python
def forward(self, source, source_length, target, target_length): """Apply Transformer. The input and output tensors are interpreted as sequences of vectors, where the first tensor dimension is the sequence dimension. Args: source (lbann.Layer): Sequence of input vectors to encoder stack. source_length (int): Length of input sequence to encoder. target (lbann.Layer): Sequence of input vectors to decoder stack. target_length (int): Length of input sequence to decoder. Returns: lbann.Layer: Sequence of output vectors. """ self.instance += 1 # Encoder stack # Note: Add positional encoding to input x = lbann.Sum([ source, self._positional_encoding(source_length)], name=f'{self.name}_instance{self.instance}_positional_source', ) for encoder_layer in self.encoder: x = encoder_layer(x) memory = x # Decoder stack # Note: Add positional encoding to input x = lbann.Sum( [target, self._positional_encoding(target_length)], name=f'{self.name}_instance{self.instance}_positional_target', ) subgraph_masks = {} if(self.branches>0): for i in range(self.branches): subgraph_masks[i+1] = lbann.Identity(self._subsequent_mask(target_length),name="mylayer"+str(i) , parallel_strategy = {'sub_branch_tag':i+1,'enable_subgraph':True}) subgraph_masks[i+1] = lbann.Identity(subgraph_masks[i+1]) if(self.branches>0): for decoder_layer in self.decoder: x = decoder_layer( x, memory, tgt_mask=subgraph_masks, ) else: for decoder_layer in self.decoder: x = decoder_layer( x, memory, tgt_mask=self._subsequent_mask(target_length), ) return x
def forward(self, source, source_length, target, target_length): """Apply Transformer. The input and output tensors are interpreted as sequences of vectors, where the first tensor dimension is the sequence dimension. Args: source (lbann.Layer): Sequence of input vectors to encoder stack. source_length (int): Length of input sequence to encoder. target (lbann.Layer): Sequence of input vectors to decoder stack. target_length (int): Length of input sequence to decoder. Returns: lbann.Layer: Sequence of output vectors. """ self.instance += 1 # Encoder stack # Note: Add positional encoding to input x = lbann.Sum([ source, self._positional_encoding(source_length)], name=f'{self.name}_instance{self.instance}_positional_source', ) for encoder_layer in self.encoder: x = encoder_layer(x) memory = x # Decoder stack # Note: Add positional encoding to input x = lbann.Sum( [target, self._positional_encoding(target_length)], name=f'{self.name}_instance{self.instance}_positional_target', ) subgraph_masks = {} if(self.branches>0): for i in range(self.branches): subgraph_masks[i+1] = lbann.Identity(self._subsequent_mask(target_length),name="mylayer"+str(i) , parallel_strategy = {'sub_branch_tag':i+1,'enable_subgraph':True}) subgraph_masks[i+1] = lbann.Identity(subgraph_masks[i+1]) if(self.branches>0): for decoder_layer in self.decoder: x = decoder_layer( x, memory, tgt_mask=subgraph_masks, ) else: for decoder_layer in self.decoder: x = decoder_layer( x, memory, tgt_mask=self._subsequent_mask(target_length), ) return x
Python
def create_cosmoflow_data_reader( train_path, val_path, test_path, num_responses): """Create a data reader for CosmoFlow. Args: {train, val, test}_path (str): Path to the corresponding dataset. num_responses (int): The number of parameters to predict. """ reader_args = [ {"role": "train", "data_filename": train_path}, {"role": "validate", "data_filename": val_path}, {"role": "test", "data_filename": test_path}, ] for reader_arg in reader_args: reader_arg["data_file_pattern"] = "{}/*.hdf5".format( reader_arg["data_filename"]) reader_arg["hdf5_key_data"] = "full" reader_arg["hdf5_key_responses"] = "unitPar" reader_arg["num_responses"] = num_responses reader_arg.pop("data_filename") readers = [] for reader_arg in reader_args: reader = lbann.reader_pb2.Reader( name="hdf5", shuffle=(reader_arg["role"] != "test"), validation_percent=0, absolute_sample_count=0, percent_of_data_to_use=1.0, disable_labels=True, disable_responses=False, scaling_factor_int16=1.0, **reader_arg) readers.append(reader) return lbann.reader_pb2.DataReader(reader=readers)
def create_cosmoflow_data_reader( train_path, val_path, test_path, num_responses): """Create a data reader for CosmoFlow. Args: {train, val, test}_path (str): Path to the corresponding dataset. num_responses (int): The number of parameters to predict. """ reader_args = [ {"role": "train", "data_filename": train_path}, {"role": "validate", "data_filename": val_path}, {"role": "test", "data_filename": test_path}, ] for reader_arg in reader_args: reader_arg["data_file_pattern"] = "{}/*.hdf5".format( reader_arg["data_filename"]) reader_arg["hdf5_key_data"] = "full" reader_arg["hdf5_key_responses"] = "unitPar" reader_arg["num_responses"] = num_responses reader_arg.pop("data_filename") readers = [] for reader_arg in reader_args: reader = lbann.reader_pb2.Reader( name="hdf5", shuffle=(reader_arg["role"] != "test"), validation_percent=0, absolute_sample_count=0, percent_of_data_to_use=1.0, disable_labels=True, disable_responses=False, scaling_factor_int16=1.0, **reader_arg) readers.append(reader) return lbann.reader_pb2.DataReader(reader=readers)
Python
def generate_operator_layer(operator_class): """Create operator layer class for a single operator Returns a class that inherits from lbann.OperatorLayer. Args: operator_class (type): A derived class of lbann.operators.Operator """ def __init__(self, *args, **kwargs): """Operator layer with a single operator Forwards arguments to lbann.OperatorLayer or sub-class of lbann.Operator. """ layer_kwargs = lbann.Layer.__init__.__kwdefaults__.copy() op_kwargs = {} for key, value in kwargs.items(): if key in layer_kwargs: layer_kwargs[key] = value else: op_kwargs[key] = value layer_kwargs['ops'] = [ operator_class(**op_kwargs) ] OperatorLayer.__init__(self, *args, **layer_kwargs) def export_proto(self): """Construct and return a protobuf message.""" # Use default datatype if not specified if self.datatype is None: self.datatype = 0 # Convert device string to enum device = lbann.DeviceAllocation.DEFAULT_DEVICE if isinstance(self.device, str): if self.device.lower() == 'cpu': device = lbann.DeviceAllocation.CPU elif self.device.lower() == 'gpu': device = lbann.DeviceAllocation.GPU # Configure operators to match layer for o in self.ops: o.input_type = self.datatype o.output_type = self.datatype o.device_allocation = device # Generate Protobuf message return OperatorLayer.export_proto(self) # Return operator layer class class_name = operator_class.__name__ class_dict = {'__init__': __init__, 'export_proto': export_proto} return type(class_name, (OperatorLayer,), class_dict)
def generate_operator_layer(operator_class): """Create operator layer class for a single operator Returns a class that inherits from lbann.OperatorLayer. Args: operator_class (type): A derived class of lbann.operators.Operator """ def __init__(self, *args, **kwargs): """Operator layer with a single operator Forwards arguments to lbann.OperatorLayer or sub-class of lbann.Operator. """ layer_kwargs = lbann.Layer.__init__.__kwdefaults__.copy() op_kwargs = {} for key, value in kwargs.items(): if key in layer_kwargs: layer_kwargs[key] = value else: op_kwargs[key] = value layer_kwargs['ops'] = [ operator_class(**op_kwargs) ] OperatorLayer.__init__(self, *args, **layer_kwargs) def export_proto(self): """Construct and return a protobuf message.""" # Use default datatype if not specified if self.datatype is None: self.datatype = 0 # Convert device string to enum device = lbann.DeviceAllocation.DEFAULT_DEVICE if isinstance(self.device, str): if self.device.lower() == 'cpu': device = lbann.DeviceAllocation.CPU elif self.device.lower() == 'gpu': device = lbann.DeviceAllocation.GPU # Configure operators to match layer for o in self.ops: o.input_type = self.datatype o.output_type = self.datatype o.device_allocation = device # Generate Protobuf message return OperatorLayer.export_proto(self) # Return operator layer class class_name = operator_class.__name__ class_dict = {'__init__': __init__, 'export_proto': export_proto} return type(class_name, (OperatorLayer,), class_dict)
Python
def export_proto(self): """Construct and return a protobuf message.""" # Use default datatype if not specified if self.datatype is None: self.datatype = 0 # Convert device string to enum device = lbann.DeviceAllocation.DEFAULT_DEVICE if isinstance(self.device, str): if self.device.lower() == 'cpu': device = lbann.DeviceAllocation.CPU elif self.device.lower() == 'gpu': device = lbann.DeviceAllocation.GPU # Configure operators to match layer for o in self.ops: o.input_type = self.datatype o.output_type = self.datatype o.device_allocation = device # Generate Protobuf message return OperatorLayer.export_proto(self)
def export_proto(self): """Construct and return a protobuf message.""" # Use default datatype if not specified if self.datatype is None: self.datatype = 0 # Convert device string to enum device = lbann.DeviceAllocation.DEFAULT_DEVICE if isinstance(self.device, str): if self.device.lower() == 'cpu': device = lbann.DeviceAllocation.CPU elif self.device.lower() == 'gpu': device = lbann.DeviceAllocation.GPU # Configure operators to match layer for o in self.ops: o.input_type = self.datatype o.output_type = self.datatype o.device_allocation = device # Generate Protobuf message return OperatorLayer.export_proto(self)
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # TODO (tym): Figure out how to switch between LBANN builds. See # GitHub Issue #1289. import lbann.contrib.lc.paths # Load data readers from prototext dirname = os.path.dirname lbann_dir = dirname(dirname(dirname(os.path.realpath(__file__)))) pb_file = os.path.join(lbann_dir, 'model_zoo', 'data_readers', 'data_reader_mnist.prototext') message = lbann.lbann_pb2.LbannPB() with open(pb_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Set location of MNIST data for reader in message.reader: reader.data_filedir = lbann.contrib.lc.paths.mnist_dir() reader.percent_of_data_to_use = lenet_fraction # Validation set message.reader[0].validation_percent = 0.1 return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # TODO (tym): Figure out how to switch between LBANN builds. See # GitHub Issue #1289. import lbann.contrib.lc.paths # Load data readers from prototext dirname = os.path.dirname lbann_dir = dirname(dirname(dirname(os.path.realpath(__file__)))) pb_file = os.path.join(lbann_dir, 'model_zoo', 'data_readers', 'data_reader_mnist.prototext') message = lbann.lbann_pb2.LbannPB() with open(pb_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Set location of MNIST data for reader in message.reader: reader.data_filedir = lbann.contrib.lc.paths.mnist_dir() reader.percent_of_data_to_use = lenet_fraction # Validation set message.reader[0].validation_percent = 0.1 return message
Python
def export_proto(self): """Construct and return a protobuf message.""" # Initialize protobuf message model = model_pb2.Model() model.num_epochs = self.epochs model.subgraph_communication = convert_to_protbuf_enums(self.subgraph_communication) model.enable_subgraph_topology = self.subgraph_topology model.subgraph_parent_grid_resources = self.subgraph_num_common_resources if self.summary_dir is not None: model.summarizer.dir = self.summary_dir # Add model components model.layer.extend([l.export_proto() for l in self.layers]) model.weights.extend([w.export_proto() for w in self.weights]) model.objective_function.CopyFrom(self.objective_function.export_proto()) model.metric.extend([m.export_proto() for m in self.metrics]) model.callback.extend([c.export_proto() for c in self.callbacks]) return model
def export_proto(self): """Construct and return a protobuf message.""" # Initialize protobuf message model = model_pb2.Model() model.num_epochs = self.epochs model.subgraph_communication = convert_to_protbuf_enums(self.subgraph_communication) model.enable_subgraph_topology = self.subgraph_topology model.subgraph_parent_grid_resources = self.subgraph_num_common_resources if self.summary_dir is not None: model.summarizer.dir = self.summary_dir # Add model components model.layer.extend([l.export_proto() for l in self.layers]) model.weights.extend([w.export_proto() for w in self.weights]) model.objective_function.CopyFrom(self.objective_function.export_proto()) model.metric.extend([m.export_proto() for m in self.metrics]) model.callback.extend([c.export_proto() for c in self.callbacks]) return model
Python
def numpy_cross_entropy(x, xhat): """Cross entropy between two distributions, computed with NumPy The computation is performed with 64-bit floats. Args: x: Estimated distribution xhat: True distribution """ if x.dtype is not np.float64: x = x.astype(np.float64) if xhat.dtype is not np.float64: xhat = xhat.astype(np.float64) return -np.inner(xhat, np.log(x))
def numpy_cross_entropy(x, xhat): """Cross entropy between two distributions, computed with NumPy The computation is performed with 64-bit floats. Args: x: Estimated distribution xhat: True distribution """ if x.dtype is not np.float64: x = x.astype(np.float64) if xhat.dtype is not np.float64: xhat = xhat.astype(np.float64) return -np.inner(xhat, np.log(x))
Python
def make_random_array(shape, seed): """Hacked function to generate a random array. NumPy's RNG produces different values with different NumPy versions. This function is helpful when array values must be identical across all runs, e.g. when checking against precomputed metric values. Args: shape (Iterable of int): Array dimensions seed (int): Parameter for RNG. Must be non-zero. Returns: numpy.ndarray: Array of `np.float32`. Values will be in [-0.5,0.5). """ size = functools.reduce(operator.mul, shape) eps = np.finfo(np.float32).eps x = (seed / np.linspace(math.sqrt(eps), 0.1, size)) % 1 - 0.5 return x.reshape(shape).astype(np.float32)
def make_random_array(shape, seed): """Hacked function to generate a random array. NumPy's RNG produces different values with different NumPy versions. This function is helpful when array values must be identical across all runs, e.g. when checking against precomputed metric values. Args: shape (Iterable of int): Array dimensions seed (int): Parameter for RNG. Must be non-zero. Returns: numpy.ndarray: Array of `np.float32`. Values will be in [-0.5,0.5). """ size = functools.reduce(operator.mul, shape) eps = np.finfo(np.float32).eps x = (seed / np.linspace(math.sqrt(eps), 0.1, size)) % 1 - 0.5 return x.reshape(shape).astype(np.float32)
Python
def pytorch_convolution(data, kernel, bias=None, stride=1, padding=0, dilation=1, groups=1): """Wrapper around PyTorch convolution. Input and output data are NumPy arrays. """ # Convert input data to PyTorch tensors with 64-bit floats import torch import torch.nn.functional if type(data) is np.ndarray: data = torch.from_numpy(data) if type(kernel) is np.ndarray: kernel = torch.from_numpy(kernel) if type(bias) is np.ndarray: bias = torch.from_numpy(bias) if data.dtype is not torch.float64: data = data.astype(torch.float64) if kernel.dtype is not torch.float64: kernel = kernel.astype(torch.float64) if bias.dtype is not torch.float64: bias = bias.astype(torch.float64) # Perform convolution with PyTorch output = None if len(kernel.shape) == 3: output = torch.nn.functional.conv1d( data, kernel, bias, stride, padding, dilation, groups ) if len(kernel.shape) == 4: output = torch.nn.functional.conv2d( data, kernel, bias, stride, padding, dilation, groups ) if len(kernel.shape) == 5: output = torch.nn.functional.conv3d( data, kernel, bias, stride, padding, dilation, groups ) if output is None: raise ValueError('PyTorch only supports 1D, 2D, and 3D convolution') # Return output as NumPy array return output.numpy()
def pytorch_convolution(data, kernel, bias=None, stride=1, padding=0, dilation=1, groups=1): """Wrapper around PyTorch convolution. Input and output data are NumPy arrays. """ # Convert input data to PyTorch tensors with 64-bit floats import torch import torch.nn.functional if type(data) is np.ndarray: data = torch.from_numpy(data) if type(kernel) is np.ndarray: kernel = torch.from_numpy(kernel) if type(bias) is np.ndarray: bias = torch.from_numpy(bias) if data.dtype is not torch.float64: data = data.astype(torch.float64) if kernel.dtype is not torch.float64: kernel = kernel.astype(torch.float64) if bias.dtype is not torch.float64: bias = bias.astype(torch.float64) # Perform convolution with PyTorch output = None if len(kernel.shape) == 3: output = torch.nn.functional.conv1d( data, kernel, bias, stride, padding, dilation, groups ) if len(kernel.shape) == 4: output = torch.nn.functional.conv2d( data, kernel, bias, stride, padding, dilation, groups ) if len(kernel.shape) == 5: output = torch.nn.functional.conv3d( data, kernel, bias, stride, padding, dilation, groups ) if output is None: raise ValueError('PyTorch only supports 1D, 2D, and 3D convolution') # Return output as NumPy array return output.numpy()
Python
def initialize_rng(): """Initialize random seed if needed. Seed should be initialized independently on each process. We reinitialize if we detect a process fork. """ global rng_pid if rng_pid != os.getpid(): rng_pid = os.getpid() random.seed()
def initialize_rng(): """Initialize random seed if needed. Seed should be initialized independently on each process. We reinitialize if we detect a process fork. """ global rng_pid if rng_pid != os.getpid(): rng_pid = os.getpid() random.seed()
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test', ), ]) return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test', ), ]) return message
Python
def construct_model(args): """Construct LBANN for CosmoGAN 3D model. """ obj = [] metrics = [] callbacks = [] w = [args.input_width]*3 w.insert(0,args.input_channel) _sample_dims = w ps = None #have model and input ps if(args.use_distconv): ps = get_parallel_strategy_args( sample_groups=args.mini_batch_size, depth_groups=args.depth_groups, height_groups=args.height_groups, ) g_device = 'GPU' input_ = lbann.Input(name='input', data_field='samples') input_ = lbann.Reshape(input_, dims=list2str(_sample_dims),name='in_reshape', device=g_device), x1 = lbann.Identity(input_, parallel_strategy=None, name='x1') x2 = lbann.Identity(input_, name='x2') if args.compute_mse else None zero = lbann.Constant(value=0.0,num_neurons='1',name='zero',device=g_device) one = lbann.Constant(value=1.0,num_neurons='1',name='one', device=g_device) z = lbann.Reshape(lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims="64", name='noise_vec', device=g_device), dims='1 64', name='noise_vec_reshape',device=g_device) print("RUN ARGS ", args) d1_real,d1_fake,d_adv, gen_img = model.Exa3DGAN(args.input_width,args.input_channel, g_device,ps,use_bn=args.use_bn)(x1,z) layers=list(lbann.traverse_layer_graph([d1_real, d1_fake])) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc1" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2, analogous to discrim.trainable=False in Keras if(l.weights and "disc2" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') mse = lbann.MeanSquaredError([gen_img, x2], name='MSE') if args.compute_mse else None obj.append(d1_real_bce) obj.append(d1_fake_bce) obj.append(d_adv_bce) metrics.append(lbann.Metric(d_adv_bce, name='d_adv_bce')) metrics.append(lbann.Metric(d1_real_bce, name='d1_real_bce')) metrics.append(lbann.Metric(d1_fake_bce, name='d1_fake_bce')) if (mse is not None): obj.append(mse) metrics.append(lbann.Metric(mse, name='MSE')) callbacks.append(lbann.CallbackPrint()) callbacks.append(lbann.CallbackTimer()) callbacks.append(lbann.CallbackGPUMemoryUsage()) # ------------------------------------------ # Construct model # ------------------------------------------ return lbann.Model(args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def construct_model(args): """Construct LBANN for CosmoGAN 3D model. """ obj = [] metrics = [] callbacks = [] w = [args.input_width]*3 w.insert(0,args.input_channel) _sample_dims = w ps = None #have model and input ps if(args.use_distconv): ps = get_parallel_strategy_args( sample_groups=args.mini_batch_size, depth_groups=args.depth_groups, height_groups=args.height_groups, ) g_device = 'GPU' input_ = lbann.Input(name='input', data_field='samples') input_ = lbann.Reshape(input_, dims=list2str(_sample_dims),name='in_reshape', device=g_device), x1 = lbann.Identity(input_, parallel_strategy=None, name='x1') x2 = lbann.Identity(input_, name='x2') if args.compute_mse else None zero = lbann.Constant(value=0.0,num_neurons='1',name='zero',device=g_device) one = lbann.Constant(value=1.0,num_neurons='1',name='one', device=g_device) z = lbann.Reshape(lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims="64", name='noise_vec', device=g_device), dims='1 64', name='noise_vec_reshape',device=g_device) print("RUN ARGS ", args) d1_real,d1_fake,d_adv, gen_img = model.Exa3DGAN(args.input_width,args.input_channel, g_device,ps,use_bn=args.use_bn)(x1,z) layers=list(lbann.traverse_layer_graph([d1_real, d1_fake])) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc1" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2, analogous to discrim.trainable=False in Keras if(l.weights and "disc2" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') mse = lbann.MeanSquaredError([gen_img, x2], name='MSE') if args.compute_mse else None obj.append(d1_real_bce) obj.append(d1_fake_bce) obj.append(d_adv_bce) metrics.append(lbann.Metric(d_adv_bce, name='d_adv_bce')) metrics.append(lbann.Metric(d1_real_bce, name='d1_real_bce')) metrics.append(lbann.Metric(d1_fake_bce, name='d1_fake_bce')) if (mse is not None): obj.append(mse) metrics.append(lbann.Metric(mse, name='MSE')) callbacks.append(lbann.CallbackPrint()) callbacks.append(lbann.CallbackTimer()) callbacks.append(lbann.CallbackGPUMemoryUsage()) # ------------------------------------------ # Construct model # ------------------------------------------ return lbann.Model(args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ message = lbann.reader_pb2.DataReader() _reader = message.reader.add() _reader.name = 'synthetic' _reader.role = 'train' _reader.num_samples = 2 _reader.synth_dimensions = '1' _reader.percent_of_data_to_use = 1.0 return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ message = lbann.reader_pb2.DataReader() _reader = message.reader.add() _reader.name = 'synthetic' _reader.role = 'train' _reader.num_samples = 2 _reader.synth_dimensions = '1' _reader.percent_of_data_to_use = 1.0 return message
Python
def export_proto(self): """Construct and return a protobuf message.""" # Initialize protobuf message trainer = trainer_pb2.Trainer() if self.name is not None: trainer.name = self.name if self.num_parallel_readers is not None: trainer.num_parallel_readers = self.num_parallel_readers if self.random_seed is not None: trainer.random_seed = self.random_seed if self.mini_batch_size is not None: trainer.mini_batch_size = self.mini_batch_size if self.hydrogen_block_size is not None: trainer.hydrogen_block_size = self.hydrogen_block_size if self.serialize_io is not None: trainer.serialize_io = self.serialize_io if self.training_algo is not None: trainer.training_algorithm.CopyFrom(self.training_algo.export_proto()) # Add trainer components trainer.callback.extend([c.export_proto() for c in self.callbacks]) return trainer
def export_proto(self): """Construct and return a protobuf message.""" # Initialize protobuf message trainer = trainer_pb2.Trainer() if self.name is not None: trainer.name = self.name if self.num_parallel_readers is not None: trainer.num_parallel_readers = self.num_parallel_readers if self.random_seed is not None: trainer.random_seed = self.random_seed if self.mini_batch_size is not None: trainer.mini_batch_size = self.mini_batch_size if self.hydrogen_block_size is not None: trainer.hydrogen_block_size = self.hydrogen_block_size if self.serialize_io is not None: trainer.serialize_io = self.serialize_io if self.training_algo is not None: trainer.training_algorithm.CopyFrom(self.training_algo.export_proto()) # Add trainer components trainer.callback.extend([c.export_proto() for c in self.callbacks]) return trainer
Python
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann print("Dump model dir ", run_args.dump_model_dir) assert run_args.dump_model_dir, "evaluate script asssumes a pretrained WAE model" pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=run_args.z_dim) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output) recon, d1_real, d1_fake, d_adv, arg_max = waemodel(input_,z) zero = lbann.Constant(value=0.0,num_neurons='1',name='zero') one = lbann.Constant(value=1.0,num_neurons='1',name='one') d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if(l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=1e-4) wae_loss.append(d1_real_bce) wae_loss.append(d_adv_bce) wae_loss.append(d1_fake_bce) wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [lbann.Metric(d_adv_bce, name='adv_loss'), lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), #lbann.CallbackStepLearningRate(step=10, amt=0.5), lbann.CallbackTimer()] callbacks.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)) #Dump output (activation) for post processing if(run_args.dump_outputs_dir): pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'inp pred_tensor {waemodel.q_mu.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann print("Dump model dir ", run_args.dump_model_dir) assert run_args.dump_model_dir, "evaluate script asssumes a pretrained WAE model" pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=run_args.z_dim) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output) recon, d1_real, d1_fake, d_adv, arg_max = waemodel(input_,z) zero = lbann.Constant(value=0.0,num_neurons='1',name='zero') one = lbann.Constant(value=1.0,num_neurons='1',name='one') d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if(l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=1e-4) wae_loss.append(d1_real_bce) wae_loss.append(d_adv_bce) wae_loss.append(d1_fake_bce) wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [lbann.Metric(d_adv_bce, name='adv_loss'), lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), #lbann.CallbackStepLearningRate(step=10, amt=0.5), lbann.CallbackTimer()] callbacks.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)) #Dump output (activation) for post processing if(run_args.dump_outputs_dir): pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'inp pred_tensor {waemodel.q_mu.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
Python
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.tournament_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.tournament_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
Python
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
Python
def DGCN_layer(feature_matrix,adj_matrix, node_features): """An example 3 layer GCN kernel. Args: feature_matrix (Layer): Node feature layer. Should have the shape: (num_nodes, node_features) adj_matrix (Layer): Adjancency matrix layer. Should have the shape: (num_nodes, num_nodes) node_features (int): The number of features per node Returns: (Layer): Returns the new embedding of the node features """ out_channel_1 = 1024 out_channel_2 = 512 out_channel_3 = 256 gcn1 = DenseGCNConv(input_channels = node_features, output_channels = out_channel_1) gcn2 = DenseGCNConv(input_channels = out_channel_1, output_channels = out_channel_2) gcn3 = DenseGCNConv(input_channels = out_channel_2, output_channels = out_channel_3) out_channel = out_channel_3 x = gcn1(feature_matrix, adj_matrix ) x = lbann.Relu(x,name="DGCN1_activation") x = gcn2(x, adj_matrix) x = lbann.Relu(x, name="DGCN2_activation") x = gcn3 (x, adj_matrix) x = lbann.Relu(x, name="DGCN3_activation") return x
def DGCN_layer(feature_matrix,adj_matrix, node_features): """An example 3 layer GCN kernel. Args: feature_matrix (Layer): Node feature layer. Should have the shape: (num_nodes, node_features) adj_matrix (Layer): Adjancency matrix layer. Should have the shape: (num_nodes, num_nodes) node_features (int): The number of features per node Returns: (Layer): Returns the new embedding of the node features """ out_channel_1 = 1024 out_channel_2 = 512 out_channel_3 = 256 gcn1 = DenseGCNConv(input_channels = node_features, output_channels = out_channel_1) gcn2 = DenseGCNConv(input_channels = out_channel_1, output_channels = out_channel_2) gcn3 = DenseGCNConv(input_channels = out_channel_2, output_channels = out_channel_3) out_channel = out_channel_3 x = gcn1(feature_matrix, adj_matrix ) x = lbann.Relu(x,name="DGCN1_activation") x = gcn2(x, adj_matrix) x = lbann.Relu(x, name="DGCN2_activation") x = gcn3 (x, adj_matrix) x = lbann.Relu(x, name="DGCN3_activation") return x
Python
def DGraph_Layer(feature_matrix,adj_matrix, node_features): """An example 3 layer Graph kernel. Args: feature_matrix (Layer): Node feature layer. Should have the shape: (num_nodes, node_features) adj_matrix (Layer): Adjancency matrix layer. Should have the shape: (num_nodes, num_nodes) node_features (int): The number of features per node Returns: (Layer): Returns the new embedding of the node features """ out_channel_1 = 1024 out_channel_2 = 512 out_channel_3 = 256 gcn1 = DenseGraphConv(input_channels = node_features, output_channels = out_channel_1) gcn2 = DenseGraphConv(input_channels = out_channel_1, output_channels = out_channel_2) gcn3 = DenseGraphConv(input_channels = out_channel_2, output_channels = out_channel_3) out_channel = out_channel_3 x = gcn1(feature_matrix, adj_matrix ) x = lbann.Relu(x,name="DGraph1_activation") x = gcn2(x, adj_matrix) x = lbann.Relu(x, name="DGraph2_activation") x = gcn3 (x, adj_matrix) x = lbann.Relu(x, name="DGraph3_activation") return x
def DGraph_Layer(feature_matrix,adj_matrix, node_features): """An example 3 layer Graph kernel. Args: feature_matrix (Layer): Node feature layer. Should have the shape: (num_nodes, node_features) adj_matrix (Layer): Adjancency matrix layer. Should have the shape: (num_nodes, num_nodes) node_features (int): The number of features per node Returns: (Layer): Returns the new embedding of the node features """ out_channel_1 = 1024 out_channel_2 = 512 out_channel_3 = 256 gcn1 = DenseGraphConv(input_channels = node_features, output_channels = out_channel_1) gcn2 = DenseGraphConv(input_channels = out_channel_1, output_channels = out_channel_2) gcn3 = DenseGraphConv(input_channels = out_channel_2, output_channels = out_channel_3) out_channel = out_channel_3 x = gcn1(feature_matrix, adj_matrix ) x = lbann.Relu(x,name="DGraph1_activation") x = gcn2(x, adj_matrix) x = lbann.Relu(x, name="DGraph2_activation") x = gcn3 (x, adj_matrix) x = lbann.Relu(x, name="DGraph3_activation") return x
Python
def make_model(num_vertices = None, node_features = None, num_classes = None, kernel_type = 'GCN', callbacks = None, num_epochs = 1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, or Graph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann Model Object: A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_features = 3 assert num_vertices is not None assert num_classes is not None assert node_features is not None #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) # input should have atleast two children since the target is classification sample_dims = num_vertices*node_features + (num_vertices ** 2) + num_classes graph_dims = num_vertices*node_features + (num_vertices ** 2) feature_matrix_size = num_vertices * node_features graph_input = lbann.Slice(input_, axis = 0 , slice_points = str_list([0,feature_matrix_size,graph_dims, sample_dims]), name = "Graph_Input") feature_matrix = lbann.Reshape(graph_input, dims = str_list([num_vertices, node_features]), name="Node_features") adj_matrix = lbann.Reshape(graph_input, dims = str_list([num_vertices,num_vertices]), name="Adj_Mat") target = lbann.Identity(graph_input, name="Target") target = lbann.Reshape(target, dims=str(num_classes)) #---------------------------------- # Perform Graph Convolution #---------------------------------- if kernel_type == 'GCN': x = DGCN_layer(feature_matrix, adj_matrix, node_features) elif kernel_type == 'Graph': x = DGraph_Layer(feature_matrix, adj_matrix, node_features) else: ValueError('Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GCN or Graph'.format(kernel_type)) out_channel = 256 #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value = 1/num_vertices, num_neurons = str_list([1,num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector,x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims= str_list([out_channel]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=256, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription() #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval = 1, print_global_stat_only = False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance (callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers = layers, objective_function = loss, metrics = metrics, callbacks = callbacks ) return model
def make_model(num_vertices = None, node_features = None, num_classes = None, kernel_type = 'GCN', callbacks = None, num_epochs = 1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, or Graph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann Model Object: A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_features = 3 assert num_vertices is not None assert num_classes is not None assert node_features is not None #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) # input should have atleast two children since the target is classification sample_dims = num_vertices*node_features + (num_vertices ** 2) + num_classes graph_dims = num_vertices*node_features + (num_vertices ** 2) feature_matrix_size = num_vertices * node_features graph_input = lbann.Slice(input_, axis = 0 , slice_points = str_list([0,feature_matrix_size,graph_dims, sample_dims]), name = "Graph_Input") feature_matrix = lbann.Reshape(graph_input, dims = str_list([num_vertices, node_features]), name="Node_features") adj_matrix = lbann.Reshape(graph_input, dims = str_list([num_vertices,num_vertices]), name="Adj_Mat") target = lbann.Identity(graph_input, name="Target") target = lbann.Reshape(target, dims=str(num_classes)) #---------------------------------- # Perform Graph Convolution #---------------------------------- if kernel_type == 'GCN': x = DGCN_layer(feature_matrix, adj_matrix, node_features) elif kernel_type == 'Graph': x = DGraph_Layer(feature_matrix, adj_matrix, node_features) else: ValueError('Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GCN or Graph'.format(kernel_type)) out_channel = 256 #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value = 1/num_vertices, num_neurons = str_list([1,num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector,x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims= str_list([out_channel]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=256, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription() #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval = 1, print_global_stat_only = False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance (callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers = layers, objective_function = loss, metrics = metrics, callbacks = callbacks ) return model
Python
def pytorch_pooling(data, kernel_dims, pool_mode, stride=1, padding=0): """Wrapper around PyTorch pooling. Input and output data are NumPy arrays. """ # Convert input data to PyTorch tensors with 64-bit floats import torch import torch.nn.functional if type(data) is np.ndarray: data = torch.from_numpy(data) if data.dtype is not torch.float64: data = data.astype(torch.float64) # Perform pooling with PyTorch if len(kernel_dims) not in [1, 2, 3]: raise ValueError('PyTorch only supports 1D, 2D, and 3D pooling') func_name = "{}_pool{}d".format( {"average": "avg", "max": "max"}[pool_mode], len(kernel_dims), ) output = getattr(torch.nn.functional, func_name)( data, kernel_dims, stride, padding, ) # Return output as NumPy array return output.numpy()
def pytorch_pooling(data, kernel_dims, pool_mode, stride=1, padding=0): """Wrapper around PyTorch pooling. Input and output data are NumPy arrays. """ # Convert input data to PyTorch tensors with 64-bit floats import torch import torch.nn.functional if type(data) is np.ndarray: data = torch.from_numpy(data) if data.dtype is not torch.float64: data = data.astype(torch.float64) # Perform pooling with PyTorch if len(kernel_dims) not in [1, 2, 3]: raise ValueError('PyTorch only supports 1D, 2D, and 3D pooling') func_name = "{}_pool{}d".format( {"average": "avg", "max": "max"}[pool_mode], len(kernel_dims), ) output = getattr(torch.nn.functional, func_name)( data, kernel_dims, stride, padding, ) # Return output as NumPy array return output.numpy()
Python
def construct_trainer(lbann, ): """Construct LBANN trainer and training algorithm. Args: lbann (module): Module for LBANN Python frontend """ num_epochs = 1 mini_batch_size = num_samples() algo = lbann.KFAC( "kfac", lbann.BatchedIterativeOptimizer("sgd", epoch_count=num_epochs), damping_warmup_steps=0, kronecker_decay=0, damping_act="1e-30", damping_err="1e-30", compute_interval=1, ) trainer = lbann.Trainer(mini_batch_size, training_algo=algo) return trainer
def construct_trainer(lbann, ): """Construct LBANN trainer and training algorithm. Args: lbann (module): Module for LBANN Python frontend """ num_epochs = 1 mini_batch_size = num_samples() algo = lbann.KFAC( "kfac", lbann.BatchedIterativeOptimizer("sgd", epoch_count=num_epochs), damping_warmup_steps=0, kronecker_decay=0, damping_act="1e-30", damping_err="1e-30", compute_interval=1, ) trainer = lbann.Trainer(mini_batch_size, training_algo=algo) return trainer
Python
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None, 'should be training seq len + bos + eos' print("sequence length is {}, which is training sequence len + bos + eos".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Input(data_field='samples',name='inp_data') #Note input assumes to come from encoder script concatenation of input smiles + z inp_slice = lbann.Slice(input_, axis=0, slice_points=str_list([0, sequence_length, sequence_length+run_args.z_dim]), name='inp_slice') inp_smile = lbann.Identity(inp_slice,name='inp_smile') z = lbann.Identity(inp_slice, name='z') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) #uncomment below for random sampling #z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=str(run_args.z_dim)) x = lbann.Slice(inp_smile, slice_points=str_list([0, input_feature_dims])) x = lbann.Identity(x) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output=save_output) x_emb = lbann.Embedding( x, num_embeddings=waemodel.dictionary_size, embedding_dim=waemodel.embedding_size, name='emb', weights=waemodel.emb_weights ) pred, arg_max = waemodel.forward_decoder(x_emb,z) recon = waemodel.compute_loss(x, pred) wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) #l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) #wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [lbann.Metric(recon, name='recon')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] #Dump output (activation) for post processing pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') conc_out = lbann.Concatenation([input_,pred_tensor], name='conc_out') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'{conc_out.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None, 'should be training seq len + bos + eos' print("sequence length is {}, which is training sequence len + bos + eos".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Input(data_field='samples',name='inp_data') #Note input assumes to come from encoder script concatenation of input smiles + z inp_slice = lbann.Slice(input_, axis=0, slice_points=str_list([0, sequence_length, sequence_length+run_args.z_dim]), name='inp_slice') inp_smile = lbann.Identity(inp_slice,name='inp_smile') z = lbann.Identity(inp_slice, name='z') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) #uncomment below for random sampling #z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=str(run_args.z_dim)) x = lbann.Slice(inp_smile, slice_points=str_list([0, input_feature_dims])) x = lbann.Identity(x) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output=save_output) x_emb = lbann.Embedding( x, num_embeddings=waemodel.dictionary_size, embedding_dim=waemodel.embedding_size, name='emb', weights=waemodel.emb_weights ) pred, arg_max = waemodel.forward_decoder(x_emb,z) recon = waemodel.compute_loss(x, pred) wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) #l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) #wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [lbann.Metric(recon, name='recon')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] #Dump output (activation) for post processing pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') conc_out = lbann.Concatenation([input_,pred_tensor], name='conc_out') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'{conc_out.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
Python
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) #@todo: provide base directory and use join os.environ["DATA_PATH"] = run_args.data_path seq_len = run_args.sequence_length+run_args.z_dim print("SEQ LEN for env ", seq_len) os.environ["MAX_SEQ_LEN"] = str(seq_len) print("MODULE file ", module_file) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "test" data_reader.shuffle = False data_reader.percent_of_data_to_use = 1.0 #data_reader.validation_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) #@todo: provide base directory and use join os.environ["DATA_PATH"] = run_args.data_path seq_len = run_args.sequence_length+run_args.z_dim print("SEQ LEN for env ", seq_len) os.environ["MAX_SEQ_LEN"] = str(seq_len) print("MODULE file ", module_file) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "test" data_reader.shuffle = False data_reader.percent_of_data_to_use = 1.0 #data_reader.validation_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
Python
def construct_trainer(lbann, ): """Construct LBANN trainer and training algorithm. Args: lbann (module): Module for LBANN Python frontend """ num_epochs = 1 mini_batch_size = num_samples() // 2 algo = lbann.KFAC( "kfac", lbann.BatchedIterativeOptimizer("sgd", epoch_count=num_epochs), damping_act="1e-2", damping_err="2e-2 2e-3", damping_bn_act="3e-2", damping_bn_err="4e-2 4e-3", damping_warmup_steps=500, kronecker_decay=0.6, print_time=True, print_matrix=False, print_matrix_summary=True, use_pi=True, compute_interval=1, ) trainer = lbann.Trainer(mini_batch_size, training_algo=algo) return trainer
def construct_trainer(lbann, ): """Construct LBANN trainer and training algorithm. Args: lbann (module): Module for LBANN Python frontend """ num_epochs = 1 mini_batch_size = num_samples() // 2 algo = lbann.KFAC( "kfac", lbann.BatchedIterativeOptimizer("sgd", epoch_count=num_epochs), damping_act="1e-2", damping_err="2e-2 2e-3", damping_bn_act="3e-2", damping_bn_err="4e-2 4e-3", damping_warmup_steps=500, kronecker_decay=0.6, print_time=True, print_matrix=False, print_matrix_summary=True, use_pi=True, compute_interval=1, ) trainer = lbann.Trainer(mini_batch_size, training_algo=algo) return trainer
Python
def create_python_data_reader(lbann, file_name, sample_function_name, num_samples_function_name, sample_dims_function_name, execution_mode): """Create protobuf message for Python data reader A Python data reader gets data by importing a Python module and calling functions in its scope. Args: lbann (module): Module for LBANN Python frontend. file_name (str): Python file. sample_function_name (str): Function to get a data sample. It takes one integer argument for the sample index and returns an `Iterator` of `float`s. sample_dims_function_name (str): Function to get dimensions of a data sample. It takes no arguments and returns a `(int,)`. num_samples_function_name (str): Function to get number of data samples in data set. It takes no arguments and returns an `int`. execution_mode (str): 'train', 'validation', or 'test' """ # Extract paths file_name = os.path.realpath(file_name) dir_name = os.path.dirname(file_name) module_name = os.path.splitext(os.path.basename(file_name))[0] # Construct protobuf message for data reader reader = lbann.reader_pb2.Reader() reader.name = 'python' reader.role = execution_mode reader.shuffle = False reader.percent_of_data_to_use = 1.0 reader.python.module = module_name reader.python.module_dir = dir_name reader.python.sample_function = sample_function_name reader.python.num_samples_function = num_samples_function_name reader.python.sample_dims_function = sample_dims_function_name return reader
def create_python_data_reader(lbann, file_name, sample_function_name, num_samples_function_name, sample_dims_function_name, execution_mode): """Create protobuf message for Python data reader A Python data reader gets data by importing a Python module and calling functions in its scope. Args: lbann (module): Module for LBANN Python frontend. file_name (str): Python file. sample_function_name (str): Function to get a data sample. It takes one integer argument for the sample index and returns an `Iterator` of `float`s. sample_dims_function_name (str): Function to get dimensions of a data sample. It takes no arguments and returns a `(int,)`. num_samples_function_name (str): Function to get number of data samples in data set. It takes no arguments and returns an `int`. execution_mode (str): 'train', 'validation', or 'test' """ # Extract paths file_name = os.path.realpath(file_name) dir_name = os.path.dirname(file_name) module_name = os.path.splitext(os.path.basename(file_name))[0] # Construct protobuf message for data reader reader = lbann.reader_pb2.Reader() reader.name = 'python' reader.role = execution_mode reader.shuffle = False reader.percent_of_data_to_use = 1.0 reader.python.module = module_name reader.python.module_dir = dir_name reader.python.sample_function = sample_function_name reader.python.num_samples_function = num_samples_function_name reader.python.sample_dims_function = sample_dims_function_name return reader
Python
def numpy_l2norm2(x): """Square of L2 norm, computed with NumPy The computation is performed with 64-bit floats. """ if x.dtype is not np.float64: x = x.astype(np.float64) x = x.reshape(-1) return np.inner(x, x)
def numpy_l2norm2(x): """Square of L2 norm, computed with NumPy The computation is performed with 64-bit floats. """ if x.dtype is not np.float64: x = x.astype(np.float64) x = x.reshape(-1) return np.inner(x, x)
Python
def perturb_latent_vectors(latent_file, noise_factors): """ Given a CSV file of latent vectors, generate a series of perturbed latent vector arrays by adding zero-mean Gaussian noise to the latent vector components, with SD equal to noise_factor standard deviations of the respective components. Output each array to a separate CSV file. """ # Load the latent vector table, which includes an identifier or SMILES string in the first column #latent_df = pd.read_csv(latent_file) latent_df = pd.DataFrame(np.load(latent_file)) print("Read %s" % latent_file) print("In File shape ", latent_df.shape) id_col = latent_df.columns.values[:102] latent_cols = latent_df.columns.values[102:] latent_dim = len(latent_cols) latent_rows = len(latent_df) latent_array = latent_df[latent_cols].values for noise_factor in noise_factors: latent_array = latent_df[latent_cols].values if noise_factor > 0.0: output_df = pd.DataFrame(latent_df[id_col].values) std_dev = [np.std(latent_array[:,i]) for i in range(latent_dim)] for i in range(latent_dim): latent_array[:,i] += np.random.randn(latent_rows) * std_dev[i] * noise_factor output_df[latent_cols[i]] = latent_array[:,i] else: output_df = latent_df output_file = '%s_noise_sd_%.1f.npy' % (os.path.splitext(latent_file)[0], noise_factor) print("Out df shape ", output_df.shape) np.save(output_file, output_df.to_numpy()) print("Wrote %s" % output_file)
def perturb_latent_vectors(latent_file, noise_factors): """ Given a CSV file of latent vectors, generate a series of perturbed latent vector arrays by adding zero-mean Gaussian noise to the latent vector components, with SD equal to noise_factor standard deviations of the respective components. Output each array to a separate CSV file. """ # Load the latent vector table, which includes an identifier or SMILES string in the first column #latent_df = pd.read_csv(latent_file) latent_df = pd.DataFrame(np.load(latent_file)) print("Read %s" % latent_file) print("In File shape ", latent_df.shape) id_col = latent_df.columns.values[:102] latent_cols = latent_df.columns.values[102:] latent_dim = len(latent_cols) latent_rows = len(latent_df) latent_array = latent_df[latent_cols].values for noise_factor in noise_factors: latent_array = latent_df[latent_cols].values if noise_factor > 0.0: output_df = pd.DataFrame(latent_df[id_col].values) std_dev = [np.std(latent_array[:,i]) for i in range(latent_dim)] for i in range(latent_dim): latent_array[:,i] += np.random.randn(latent_rows) * std_dev[i] * noise_factor output_df[latent_cols[i]] = latent_array[:,i] else: output_df = latent_df output_file = '%s_noise_sd_%.1f.npy' % (os.path.splitext(latent_file)[0], noise_factor) print("Out df shape ", output_df.shape) np.save(output_file, output_df.to_numpy()) print("Wrote %s" % output_file)
Python
def download_url(url, save_path): """Helper function to download file from url and save it on save_path """ with urllib.request.urlopen(url) as dl_file: with open(save_path, 'wb') as out_file: out_file.write(dl_file.read())
def download_url(url, save_path): """Helper function to download file from url and save it on save_path """ with urllib.request.urlopen(url) as dl_file: with open(save_path, 'wb') as out_file: out_file.write(dl_file.read())
Python
def edge_list_to_dense(elist, num_vertices = 75): """ Generates an (num_vertices, num_vertices) adjacency matrix given edge list, elist """ adj_mat = np.zeros((num_vertices,num_vertices), dtype=np.float) num_edges = elist.shape[0] for edge in range(num_edges): source, sink = elist[edge,:] source = source.item() sink = sink.item() adj_mat[source][sink] = 1.0 adj_mat[sink][source] = 1.0 return adj_mat
def edge_list_to_dense(elist, num_vertices = 75): """ Generates an (num_vertices, num_vertices) adjacency matrix given edge list, elist """ adj_mat = np.zeros((num_vertices,num_vertices), dtype=np.float) num_edges = elist.shape[0] for edge in range(num_edges): source, sink = elist[edge,:] source = source.item() sink = sink.item() adj_mat[source][sink] = 1.0 adj_mat[sink][source] = 1.0 return adj_mat
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'validate', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'tournament', ), ]) return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'validate', ), tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'tournament', ), ]) return message
Python
def construct_python_data_reader(): """Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ import os.path module_file = os.path.abspath(__file__) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = 'python' data_reader.role = 'train' data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.2 data_reader.python.module = 'dataset3D' data_reader.python.module_dir = module_dir data_reader.python.sample_function = 'get_sample' data_reader.python.num_samples_function = 'num_samples' data_reader.python.sample_dims_function = 'sample_dims' return message
def construct_python_data_reader(): """Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ import os.path module_file = os.path.abspath(__file__) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = 'python' data_reader.role = 'train' data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.2 data_reader.python.module = 'dataset3D' data_reader.python.module_dir = module_dir data_reader.python.sample_function = 'get_sample' data_reader.python.num_samples_function = 'num_samples' data_reader.python.sample_dims_function = 'sample_dims' return message
Python
def create_hdf5_data_reader( train_path, val_path, test_path, num_responses=4): """Create a data reader for CosmoFlow. Args: {train, val, test}_path (str): Path to the corresponding dataset. num_responses (int): The number of parameters to predict. """ reader_args = [ {"role": "train", "data_filename": train_path}, {"role": "validate", "data_filename": val_path}, {"role": "test", "data_filename": test_path}, ] for reader_arg in reader_args: reader_arg["data_file_pattern"] = "{}/*.hdf5".format( reader_arg["data_filename"]) reader_arg["hdf5_key_data"] = "full" #reader_arg["hdf5_key_responses"] = "unitPar" #reader_arg["num_responses"] = num_responses reader_arg.pop("data_filename") readers = [] for reader_arg in reader_args: reader = lbann.reader_pb2.Reader( name="hdf5", shuffle=(reader_arg["role"] != "test"), validation_percent=0, absolute_sample_count=0, percent_of_data_to_use=1.0, disable_labels=True, disable_responses=True, scaling_factor_int16=1.0, **reader_arg) readers.append(reader) return lbann.reader_pb2.DataReader(reader=readers)
def create_hdf5_data_reader( train_path, val_path, test_path, num_responses=4): """Create a data reader for CosmoFlow. Args: {train, val, test}_path (str): Path to the corresponding dataset. num_responses (int): The number of parameters to predict. """ reader_args = [ {"role": "train", "data_filename": train_path}, {"role": "validate", "data_filename": val_path}, {"role": "test", "data_filename": test_path}, ] for reader_arg in reader_args: reader_arg["data_file_pattern"] = "{}/*.hdf5".format( reader_arg["data_filename"]) reader_arg["hdf5_key_data"] = "full" #reader_arg["hdf5_key_responses"] = "unitPar" #reader_arg["num_responses"] = num_responses reader_arg.pop("data_filename") readers = [] for reader_arg in reader_args: reader = lbann.reader_pb2.Reader( name="hdf5", shuffle=(reader_arg["role"] != "test"), validation_percent=0, absolute_sample_count=0, percent_of_data_to_use=1.0, disable_labels=True, disable_responses=True, scaling_factor_int16=1.0, **reader_arg) readers.append(reader) return lbann.reader_pb2.DataReader(reader=readers)
Python
def download_data(): """Download MNIST data files, if needed. Data files are downloaded from http://yann.lecun.com/exdb/mnist/ and uncompressed. Does nothing if the files already exist. """ # MNIST data files and associated URLs urls = { 'train-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', } # Download and uncompress MNIST data files, if needed for data_file, url in urls.items(): data_file = os.path.join(data_dir, data_file) compressed_file = data_file + '.gz' if not os.path.isfile(data_file): request = urllib.request.Request( url, headers={'User-Agent': 'LBANN/vision-app'}, ) with urllib.request.urlopen(request) as response, \ open(compressed_file, 'wb') as out_file: out_file.write(response.read()) with gzip.open(compressed_file, 'rb') as in_file, \ open(data_file, 'wb') as out_file: out_file.write(in_file.read())
def download_data(): """Download MNIST data files, if needed. Data files are downloaded from http://yann.lecun.com/exdb/mnist/ and uncompressed. Does nothing if the files already exist. """ # MNIST data files and associated URLs urls = { 'train-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', } # Download and uncompress MNIST data files, if needed for data_file, url in urls.items(): data_file = os.path.join(data_dir, data_file) compressed_file = data_file + '.gz' if not os.path.isfile(data_file): request = urllib.request.Request( url, headers={'User-Agent': 'LBANN/vision-app'}, ) with urllib.request.urlopen(request) as response, \ open(compressed_file, 'wb') as out_file: out_file.write(response.read()) with gzip.open(compressed_file, 'rb') as in_file, \ open(data_file, 'wb') as out_file: out_file.write(in_file.read())
Python
def make_data_reader(validation_percent=0.1): """Make Protobuf message for MNIST data reader. MNIST data is downloaded if needed. Args: validation_percent (float): The proportion of samples to be tested as the validation dataset. """ # Download MNIST data files download_data() # Load Protobuf message from file protobuf_file = os.path.join(data_dir, 'data_reader.prototext') message = lbann.lbann_pb2.LbannPB() with open(protobuf_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader if validation_percent is not None: assert message.reader[0].role == "train" message.reader[0].validation_percent = validation_percent # Set paths for reader in message.reader: reader.data_filedir = data_dir return message
def make_data_reader(validation_percent=0.1): """Make Protobuf message for MNIST data reader. MNIST data is downloaded if needed. Args: validation_percent (float): The proportion of samples to be tested as the validation dataset. """ # Download MNIST data files download_data() # Load Protobuf message from file protobuf_file = os.path.join(data_dir, 'data_reader.prototext') message = lbann.lbann_pb2.LbannPB() with open(protobuf_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader if validation_percent is not None: assert message.reader[0].role == "train" message.reader[0].validation_percent = validation_percent # Set paths for reader in message.reader: reader.data_filedir = data_dir return message
Python
def GraphExpand(features, indices, name=None): """Places the features according the indices to an expanded matrix output[i] = features[indices[i]] Args: features (Layer) : 2D matrix with shape (N, F) indices (Layer): 1D matrix with shape (E) returnL (Layer) of shape (E,F) """ GraphExpand.count += 1 if (name is None): name = f"graph_expand_{GraphExpand.count}" return lbann.Gather(features, indices, axis=0, name=name)
def GraphExpand(features, indices, name=None): """Places the features according the indices to an expanded matrix output[i] = features[indices[i]] Args: features (Layer) : 2D matrix with shape (N, F) indices (Layer): 1D matrix with shape (E) returnL (Layer) of shape (E,F) """ GraphExpand.count += 1 if (name is None): name = f"graph_expand_{GraphExpand.count}" return lbann.Gather(features, indices, axis=0, name=name)
Python
def _generate_class(message_descriptor, base_field_name, base_class, base_kwargs, base_has_export_proto): """Generate new class from Protobuf message. Args: message (google.protobuf.descriptor.Descriptor): Descriptor for Protobuf message. base_field_name (str): Name of corresponding field in parent message. base_class (type): Base class for generated class. base_kwargs (Iterable of str): Keyword arguments for base class `__init__` method. base_has_export_proto (bool): Whether the base class implements an `export_proto` method. If `True`, the generated class `export_proto` will set the appropriate field in the Protobuf message returned by the base class `export_proto`. Returns: type: Generated class. """ # Names of Protobuf message and its fields message_name = message_descriptor.name field_descriptors = message_descriptor.fields_by_name field_names = field_descriptors.keys() enums = message_descriptor.enum_types_by_name # Handle "enum" type data. all_enums = {} for enum_name, enum_desc in enums.items(): enum_val_to_num = {} enum_val_descs = enum_desc.values_by_name for val_name, val_desc in enum_val_descs.items(): enum_val_to_num[val_name] = val_desc.number all_enums[enum_name] = type(enum_name, (), enum_val_to_num) # Note (trb 12/18/19): This is *NOT* meant to be a rigorous enum # implementation (see the 'enum' module for such a thing). The # goal is to simply expose "enum-like" semantics to the Python # front-end: # # x = ClassName.EnumName.ENUM_VALUE # # Note that the value held by "x" after this will be "int". Based # on my testing, Protobuf message classes are happy enough to take # their enum-valued field values as "int", so this is not a # problem. # Make sure fields in generated and base classes are distinct for arg in base_kwargs: if arg in field_names: raise RuntimeError( 'class {0} and its parent class {1} ' 'both define the field {2}. This is a bug!' .format(message_name, base_class.__name__, arg)) def __init__(self, *args, **kwargs): # Extract arguments to pass to base class constructor _base_kwargs = {} for arg in base_kwargs: if arg in kwargs: _base_kwargs[arg] = kwargs[arg] del kwargs[arg] base_class.__init__(self, *args, **_base_kwargs) # Make sure arguments are valid for arg in kwargs: if arg not in field_names: raise ValueError('Unknown argument {0}'.format(arg)) # Set field values for arg in field_names: setattr(self, arg, kwargs.get(arg, None)) def export_proto(self): """Construct and return a protobuf message.""" # Construct Protobuf message if base_has_export_proto: proto = base_class.export_proto(self) message = getattr(proto, base_field_name) message.SetInParent() else: # TODO (trb 08/01/2019): This list would have to be # updated any time another _pb2 file is created. It might # be better to have this as a global `frozenset` # (ndryden's suggestion) that gets maintained # elsewhere. But this code either works or doesn't get # executed now, so I vote delaying this fix until a need # arises. proto_modules = [callbacks_pb2, layers_pb2, metrics_pb2, model_pb2, objective_functions_pb2, operators_pb2, optimizers_pb2, training_algorithm_pb2, weights_pb2] proto_type = None while proto_type is None: proto_type = getattr(proto_modules.pop(), message_name, None) proto = proto_type() message = proto # Set message for field_name in field_names: val = getattr(self, field_name) if val is not None: try: field = getattr(message, field_name) field_descriptor = field_descriptors[field_name] if field_descriptor.message_type in _protobuf_type_wrappers: field.SetInParent() field.value = val elif field_descriptor.label == google.protobuf.descriptor.FieldDescriptor.LABEL_REPEATED: iterable_val = make_iterable(val) if field_descriptor.type == field_descriptor.TYPE_MESSAGE: field.extend([x.export_proto() for x in iterable_val]) else: field.extend(iterable_val) elif isinstance(val, google.protobuf.message.Message): getattr(message, field_name).MergeFrom(val) elif callable(getattr(val, "export_proto", None)): # 'val' is (hopefully) an LBANN class # representation of a protobuf message. getattr(message, field_name).MergeFrom(val.export_proto()) else: setattr(message, field_name, val) except: raise TypeError('{} is invalid type for {}.{}' .format(type(val).__name__, self.__class__.__name__, field_name)) # Return Protobuf message return proto def get_field_names(self): """Names of parameters in derived class.""" return field_names # Generate docstring if message_descriptor.fields: doc = 'Fields:\n' for field in message_descriptor.fields: doc += ' {0} ({1} {2})\n'.format( field.name, _protobuf_field_label_names.get(field.label, 'unknown'), _protobuf_field_type_names.get(field.type, 'unknown')) else: doc = 'Fields: none\n' # Create new class class_dictionary = {'__init__': __init__, '__doc__': doc, 'export_proto': export_proto, 'get_field_names': get_field_names} class_dictionary.update(all_enums) return type(message_name, (base_class,), class_dictionary)
def _generate_class(message_descriptor, base_field_name, base_class, base_kwargs, base_has_export_proto): """Generate new class from Protobuf message. Args: message (google.protobuf.descriptor.Descriptor): Descriptor for Protobuf message. base_field_name (str): Name of corresponding field in parent message. base_class (type): Base class for generated class. base_kwargs (Iterable of str): Keyword arguments for base class `__init__` method. base_has_export_proto (bool): Whether the base class implements an `export_proto` method. If `True`, the generated class `export_proto` will set the appropriate field in the Protobuf message returned by the base class `export_proto`. Returns: type: Generated class. """ # Names of Protobuf message and its fields message_name = message_descriptor.name field_descriptors = message_descriptor.fields_by_name field_names = field_descriptors.keys() enums = message_descriptor.enum_types_by_name # Handle "enum" type data. all_enums = {} for enum_name, enum_desc in enums.items(): enum_val_to_num = {} enum_val_descs = enum_desc.values_by_name for val_name, val_desc in enum_val_descs.items(): enum_val_to_num[val_name] = val_desc.number all_enums[enum_name] = type(enum_name, (), enum_val_to_num) # Note (trb 12/18/19): This is *NOT* meant to be a rigorous enum # implementation (see the 'enum' module for such a thing). The # goal is to simply expose "enum-like" semantics to the Python # front-end: # # x = ClassName.EnumName.ENUM_VALUE # # Note that the value held by "x" after this will be "int". Based # on my testing, Protobuf message classes are happy enough to take # their enum-valued field values as "int", so this is not a # problem. # Make sure fields in generated and base classes are distinct for arg in base_kwargs: if arg in field_names: raise RuntimeError( 'class {0} and its parent class {1} ' 'both define the field {2}. This is a bug!' .format(message_name, base_class.__name__, arg)) def __init__(self, *args, **kwargs): # Extract arguments to pass to base class constructor _base_kwargs = {} for arg in base_kwargs: if arg in kwargs: _base_kwargs[arg] = kwargs[arg] del kwargs[arg] base_class.__init__(self, *args, **_base_kwargs) # Make sure arguments are valid for arg in kwargs: if arg not in field_names: raise ValueError('Unknown argument {0}'.format(arg)) # Set field values for arg in field_names: setattr(self, arg, kwargs.get(arg, None)) def export_proto(self): """Construct and return a protobuf message.""" # Construct Protobuf message if base_has_export_proto: proto = base_class.export_proto(self) message = getattr(proto, base_field_name) message.SetInParent() else: # TODO (trb 08/01/2019): This list would have to be # updated any time another _pb2 file is created. It might # be better to have this as a global `frozenset` # (ndryden's suggestion) that gets maintained # elsewhere. But this code either works or doesn't get # executed now, so I vote delaying this fix until a need # arises. proto_modules = [callbacks_pb2, layers_pb2, metrics_pb2, model_pb2, objective_functions_pb2, operators_pb2, optimizers_pb2, training_algorithm_pb2, weights_pb2] proto_type = None while proto_type is None: proto_type = getattr(proto_modules.pop(), message_name, None) proto = proto_type() message = proto # Set message for field_name in field_names: val = getattr(self, field_name) if val is not None: try: field = getattr(message, field_name) field_descriptor = field_descriptors[field_name] if field_descriptor.message_type in _protobuf_type_wrappers: field.SetInParent() field.value = val elif field_descriptor.label == google.protobuf.descriptor.FieldDescriptor.LABEL_REPEATED: iterable_val = make_iterable(val) if field_descriptor.type == field_descriptor.TYPE_MESSAGE: field.extend([x.export_proto() for x in iterable_val]) else: field.extend(iterable_val) elif isinstance(val, google.protobuf.message.Message): getattr(message, field_name).MergeFrom(val) elif callable(getattr(val, "export_proto", None)): # 'val' is (hopefully) an LBANN class # representation of a protobuf message. getattr(message, field_name).MergeFrom(val.export_proto()) else: setattr(message, field_name, val) except: raise TypeError('{} is invalid type for {}.{}' .format(type(val).__name__, self.__class__.__name__, field_name)) # Return Protobuf message return proto def get_field_names(self): """Names of parameters in derived class.""" return field_names # Generate docstring if message_descriptor.fields: doc = 'Fields:\n' for field in message_descriptor.fields: doc += ' {0} ({1} {2})\n'.format( field.name, _protobuf_field_label_names.get(field.label, 'unknown'), _protobuf_field_type_names.get(field.type, 'unknown')) else: doc = 'Fields: none\n' # Create new class class_dictionary = {'__init__': __init__, '__doc__': doc, 'export_proto': export_proto, 'get_field_names': get_field_names} class_dictionary.update(all_enums) return type(message_name, (base_class,), class_dictionary)
Python
def export_proto(self): """Construct and return a protobuf message.""" # Construct Protobuf message if base_has_export_proto: proto = base_class.export_proto(self) message = getattr(proto, base_field_name) message.SetInParent() else: # TODO (trb 08/01/2019): This list would have to be # updated any time another _pb2 file is created. It might # be better to have this as a global `frozenset` # (ndryden's suggestion) that gets maintained # elsewhere. But this code either works or doesn't get # executed now, so I vote delaying this fix until a need # arises. proto_modules = [callbacks_pb2, layers_pb2, metrics_pb2, model_pb2, objective_functions_pb2, operators_pb2, optimizers_pb2, training_algorithm_pb2, weights_pb2] proto_type = None while proto_type is None: proto_type = getattr(proto_modules.pop(), message_name, None) proto = proto_type() message = proto # Set message for field_name in field_names: val = getattr(self, field_name) if val is not None: try: field = getattr(message, field_name) field_descriptor = field_descriptors[field_name] if field_descriptor.message_type in _protobuf_type_wrappers: field.SetInParent() field.value = val elif field_descriptor.label == google.protobuf.descriptor.FieldDescriptor.LABEL_REPEATED: iterable_val = make_iterable(val) if field_descriptor.type == field_descriptor.TYPE_MESSAGE: field.extend([x.export_proto() for x in iterable_val]) else: field.extend(iterable_val) elif isinstance(val, google.protobuf.message.Message): getattr(message, field_name).MergeFrom(val) elif callable(getattr(val, "export_proto", None)): # 'val' is (hopefully) an LBANN class # representation of a protobuf message. getattr(message, field_name).MergeFrom(val.export_proto()) else: setattr(message, field_name, val) except: raise TypeError('{} is invalid type for {}.{}' .format(type(val).__name__, self.__class__.__name__, field_name)) # Return Protobuf message return proto
def export_proto(self): """Construct and return a protobuf message.""" # Construct Protobuf message if base_has_export_proto: proto = base_class.export_proto(self) message = getattr(proto, base_field_name) message.SetInParent() else: # TODO (trb 08/01/2019): This list would have to be # updated any time another _pb2 file is created. It might # be better to have this as a global `frozenset` # (ndryden's suggestion) that gets maintained # elsewhere. But this code either works or doesn't get # executed now, so I vote delaying this fix until a need # arises. proto_modules = [callbacks_pb2, layers_pb2, metrics_pb2, model_pb2, objective_functions_pb2, operators_pb2, optimizers_pb2, training_algorithm_pb2, weights_pb2] proto_type = None while proto_type is None: proto_type = getattr(proto_modules.pop(), message_name, None) proto = proto_type() message = proto # Set message for field_name in field_names: val = getattr(self, field_name) if val is not None: try: field = getattr(message, field_name) field_descriptor = field_descriptors[field_name] if field_descriptor.message_type in _protobuf_type_wrappers: field.SetInParent() field.value = val elif field_descriptor.label == google.protobuf.descriptor.FieldDescriptor.LABEL_REPEATED: iterable_val = make_iterable(val) if field_descriptor.type == field_descriptor.TYPE_MESSAGE: field.extend([x.export_proto() for x in iterable_val]) else: field.extend(iterable_val) elif isinstance(val, google.protobuf.message.Message): getattr(message, field_name).MergeFrom(val) elif callable(getattr(val, "export_proto", None)): # 'val' is (hopefully) an LBANN class # representation of a protobuf message. getattr(message, field_name).MergeFrom(val.export_proto()) else: setattr(message, field_name, val) except: raise TypeError('{} is invalid type for {}.{}' .format(type(val).__name__, self.__class__.__name__, field_name)) # Return Protobuf message return proto
Python
def export_proto(self): """Get a protobuf representation of this object.""" op = OpProto.Operator() op.input_datatype = self.input_type op.output_datatype = self.output_type if self.device: op.device_allocation = self.device op.parameters.Pack(self.do_export_proto()) return op
def export_proto(self): """Get a protobuf representation of this object.""" op = OpProto.Operator() op.input_datatype = self.input_type op.output_datatype = self.output_type if self.device: op.device_allocation = self.device op.parameters.Pack(self.do_export_proto()) return op
Python
def do_export_proto(self): """Get a protobuf representation of this object. Must be implemented in derived classes. """ raise NotImplementedError
def do_export_proto(self): """Get a protobuf representation of this object. Must be implemented in derived classes. """ raise NotImplementedError
Python
def export_proto(self): """Get a protobuf representation of this object.""" algo = AlgoProto.TrainingAlgorithm() algo.name = self.name algo.parameters.Pack(self.do_export_proto()) return algo
def export_proto(self): """Get a protobuf representation of this object.""" algo = AlgoProto.TrainingAlgorithm() algo.name = self.name algo.parameters.Pack(self.do_export_proto()) return algo
Python
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.SGD.TerminationCriteria() if self.batch_count > 0: msg.max_batches = self.batch_count if self.epoch_count > 0: msg.max_epochs = self.epoch_count if self.seconds > 0: msg.max_seconds = self.seconds return msg
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.SGD.TerminationCriteria() if self.batch_count > 0: msg.max_batches = self.batch_count if self.epoch_count > 0: msg.max_epochs = self.epoch_count if self.seconds > 0: msg.max_seconds = self.seconds return msg
Python
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.SGD() params.stopping_criteria.CopyFrom(self.stopping.export_proto()) return params
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.SGD() params.stopping_criteria.CopyFrom(self.stopping.export_proto()) return params
Python
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.LTFB.TerminationCriteria() msg.max_tournaments = self.metalearning_steps return msg
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.LTFB.TerminationCriteria() msg.max_tournaments = self.metalearning_steps return msg
Python
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.LTFB() params.stopping_criteria.CopyFrom(self.stopping.export_proto()) params.meta_learning_strategy.Pack(self.metalearning.export_proto()) params.local_training_algorithm.CopyFrom(self.local_algo.export_proto()) return params
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.LTFB() params.stopping_criteria.CopyFrom(self.stopping.export_proto()) params.meta_learning_strategy.Pack(self.metalearning.export_proto()) params.local_training_algorithm.CopyFrom(self.local_algo.export_proto()) return params
Python
def export_proto(self): """Get a protobuf representation of this object.""" MutationStrategyMsg = AlgoProto.MutationStrategy msg = MutationStrategyMsg() if self.strategy == "null_mutation": NullMutationMsg = MutationStrategyMsg.NullMutation msg.null_mutation.CopyFrom(NullMutationMsg()) elif self.strategy == "replace_activation": ReplaceActivationMsg = MutationStrategyMsg.ReplaceActivation msg.replace_activation.CopyFrom(ReplaceActivationMsg()) elif self.strategy == "replace_convolution": ReplaceConvolutionMsg = MutationStrategyMsg.ReplaceConvolution msg.replace_convolution.CopyFrom(ReplaceConvolutionMsg()) elif self.strategy == "hybrid_mutation": HybridMutationMsg = MutationStrategyMsg.HybridMutation msg.hybrid_mutation.CopyFrom(HybridMutationMsg()) else: raise ValueError("Unknown Strategy") return msg
def export_proto(self): """Get a protobuf representation of this object.""" MutationStrategyMsg = AlgoProto.MutationStrategy msg = MutationStrategyMsg() if self.strategy == "null_mutation": NullMutationMsg = MutationStrategyMsg.NullMutation msg.null_mutation.CopyFrom(NullMutationMsg()) elif self.strategy == "replace_activation": ReplaceActivationMsg = MutationStrategyMsg.ReplaceActivation msg.replace_activation.CopyFrom(ReplaceActivationMsg()) elif self.strategy == "replace_convolution": ReplaceConvolutionMsg = MutationStrategyMsg.ReplaceConvolution msg.replace_convolution.CopyFrom(ReplaceConvolutionMsg()) elif self.strategy == "hybrid_mutation": HybridMutationMsg = MutationStrategyMsg.HybridMutation msg.hybrid_mutation.CopyFrom(HybridMutationMsg()) else: raise ValueError("Unknown Strategy") return msg
Python
def export_proto(self): """Get a protobuf representation of this object.""" ExchangeStrategyMsg = AlgoProto.RandomPairwiseExchange.ExchangeStrategy msg = ExchangeStrategyMsg() msg.weights_name.extend([n for n in self.weights_names]) if self.strategy == "checkpoint_binary": CheckpointBinaryMsg = ExchangeStrategyMsg.CheckpointBinary msg.checkpoint_binary.CopyFrom(CheckpointBinaryMsg()) elif self.strategy == "checkpoint_file": if self.checkpoint_dir: msg.checkpoint_file.checkpoint_dir = self.checkpoint_dir else: raise Exception("Must provide checkpoint dir") elif self.strategy == "sendrecv_weights": msg.sendrecv_weights.exchange_hyperparameters = self.exchange_hyperparameters else: raise ValueError("Unknown strategy") return msg
def export_proto(self): """Get a protobuf representation of this object.""" ExchangeStrategyMsg = AlgoProto.RandomPairwiseExchange.ExchangeStrategy msg = ExchangeStrategyMsg() msg.weights_name.extend([n for n in self.weights_names]) if self.strategy == "checkpoint_binary": CheckpointBinaryMsg = ExchangeStrategyMsg.CheckpointBinary msg.checkpoint_binary.CopyFrom(CheckpointBinaryMsg()) elif self.strategy == "checkpoint_file": if self.checkpoint_dir: msg.checkpoint_file.checkpoint_dir = self.checkpoint_dir else: raise Exception("Must provide checkpoint dir") elif self.strategy == "sendrecv_weights": msg.sendrecv_weights.exchange_hyperparameters = self.exchange_hyperparameters else: raise ValueError("Unknown strategy") return msg
Python
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.RandomPairwiseExchange() for key, value in self.metric_strategies.items(): msg.metric_name_strategy_map[key] = value msg.exchange_strategy.CopyFrom(self.exchange_strategy.export_proto()) msg.mutation_strategy.CopyFrom(self.mutation_strategy.export_proto()) return msg
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.RandomPairwiseExchange() for key, value in self.metric_strategies.items(): msg.metric_name_strategy_map[key] = value msg.exchange_strategy.CopyFrom(self.exchange_strategy.export_proto()) msg.mutation_strategy.CopyFrom(self.mutation_strategy.export_proto()) return msg