language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def _get_all_steps(pipeline: Pipeline) -> List: """ Returns a list of steps in a sklearn pipeline. Args: pipeline (Pipeline): A scikitlearn pipeline. """ steps = [] for step in pipeline.steps: if type(step) == tuple: step = step[1] if hasattr(step, "steps"): steps += _get_all_steps(step) else: steps.append(step) return steps
def _get_all_steps(pipeline: Pipeline) -> List: """ Returns a list of steps in a sklearn pipeline. Args: pipeline (Pipeline): A scikitlearn pipeline. """ steps = [] for step in pipeline.steps: if type(step) == tuple: step = step[1] if hasattr(step, "steps"): steps += _get_all_steps(step) else: steps.append(step) return steps
Python
def save_yml(self, fout): """ Save the features bucket to a yaml file. Args: fout: file output """ check_is_fitted(self.steps[-1][1]) self.features_bucket_mapping_.save_yml(fout)
def save_yml(self, fout): """ Save the features bucket to a yaml file. Args: fout: file output """ check_is_fitted(self.steps[-1][1]) self.features_bucket_mapping_.save_yml(fout)
Python
def _check_pipeline_duplicated_columns(pipeline: Pipeline) -> None: """ Check that the pipeline has no duplicated columns. This check only works on fitted pipelines! """ assert isinstance(pipeline, Pipeline) bucketers_vars = [] bucketers_on_all = [] bucketers_with_vars = [] for step in _get_all_steps(pipeline): if is_fitted(step): if hasattr(step, "variables_"): if len(step.variables_) == 0: bucketers_vars += ["**all**"] bucketers_on_all += [step] else: bucketers_vars += step.variables_ bucketers_with_vars += [step] else: if hasattr(step, "variables"): if len(step.variables) == 0: bucketers_vars += ["**all**"] bucketers_on_all += [step] else: bucketers_vars += step.variables bucketers_with_vars += [step] if len(list(set(bucketers_vars))) > 1 and "**all**" in list(set(bucketers_vars)): msg = "A SkorecardPipeline should bucket each feature only once.\n" msg += f"These bucketers bucket all features: {bucketers_on_all}\n" msg += f"While these bucket specific ones: {bucketers_with_vars}\n" msg += "This means some features would have been bucketed sequentially." msg += "To solve this, either use a BucketingProcess, or remove the duplicates from one of the bucketers." msg += "Remember that if you don't specify 'variables', a bucketer will bucket all columns." raise BucketingPipelineError(msg) if len(set(bucketers_vars)) != len(bucketers_vars): values, counts = np.unique(bucketers_vars, return_counts=True) duplicates = list(set(values[counts > 1])) msg = "A SkorecardPipeline should bucket each feature only once. " msg += f"The features {duplicates} appear in multiple bucketers, " msg += "meaning they would have been bucketed sequentially." msg += "To solve this, either use a BucketingProcess, or remove the duplicates from one of the bucketers." msg += "Remember that if you don't specify 'variables', a bucketer will bucket all columns." raise BucketingPipelineError(msg)
def _check_pipeline_duplicated_columns(pipeline: Pipeline) -> None: """ Check that the pipeline has no duplicated columns. This check only works on fitted pipelines! """ assert isinstance(pipeline, Pipeline) bucketers_vars = [] bucketers_on_all = [] bucketers_with_vars = [] for step in _get_all_steps(pipeline): if is_fitted(step): if hasattr(step, "variables_"): if len(step.variables_) == 0: bucketers_vars += ["**all**"] bucketers_on_all += [step] else: bucketers_vars += step.variables_ bucketers_with_vars += [step] else: if hasattr(step, "variables"): if len(step.variables) == 0: bucketers_vars += ["**all**"] bucketers_on_all += [step] else: bucketers_vars += step.variables bucketers_with_vars += [step] if len(list(set(bucketers_vars))) > 1 and "**all**" in list(set(bucketers_vars)): msg = "A SkorecardPipeline should bucket each feature only once.\n" msg += f"These bucketers bucket all features: {bucketers_on_all}\n" msg += f"While these bucket specific ones: {bucketers_with_vars}\n" msg += "This means some features would have been bucketed sequentially." msg += "To solve this, either use a BucketingProcess, or remove the duplicates from one of the bucketers." msg += "Remember that if you don't specify 'variables', a bucketer will bucket all columns." raise BucketingPipelineError(msg) if len(set(bucketers_vars)) != len(bucketers_vars): values, counts = np.unique(bucketers_vars, return_counts=True) duplicates = list(set(values[counts > 1])) msg = "A SkorecardPipeline should bucket each feature only once. " msg += f"The features {duplicates} appear in multiple bucketers, " msg += "meaning they would have been bucketed sequentially." msg += "To solve this, either use a BucketingProcess, or remove the duplicates from one of the bucketers." msg += "Remember that if you don't specify 'variables', a bucketer will bucket all columns." raise BucketingPipelineError(msg)
Python
def _check_pipeline_all_bucketers(pipeline: Pipeline) -> None: """ Ensure all specified bucketing steps are skorecard bucketers. Args: pipeline: scikit-learn pipeline. """ assert isinstance(pipeline, Pipeline) for step in _get_all_steps(pipeline): if all(x not in str(type(step)) for x in ["bucketing_process", "skorecard.bucketers"]): msg = "All bucketing steps must be skorecard bucketers." msg += f"Remove {step} from the pipeline." raise NotBucketObjectError(msg)
def _check_pipeline_all_bucketers(pipeline: Pipeline) -> None: """ Ensure all specified bucketing steps are skorecard bucketers. Args: pipeline: scikit-learn pipeline. """ assert isinstance(pipeline, Pipeline) for step in _get_all_steps(pipeline): if all(x not in str(type(step)) for x in ["bucketing_process", "skorecard.bucketers"]): msg = "All bucketing steps must be skorecard bucketers." msg += f"Remove {step} from the pipeline." raise NotBucketObjectError(msg)
Python
def fit_interactive(self, X, y=None, mode="external"): """ Fit a bucketer and then interactively edit the fit using a dash app. Note we are using a [jupyterdash](https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e) app, which supports 3 different modes: - 'external' (default): Start dash server and print URL - 'inline': Start dash app inside an Iframe in the jupyter notebook - 'jupyterlab': Start dash app as a new tab inside jupyterlab """ # We need to make sure we only fit if not already fitted # This prevents a user losing manually defined boundaries # when re-running .fit_interactive() if not is_fitted(self): self.fit(X, y) self.app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) add_basic_layout(self) add_bucketing_callbacks(self, X, y) self.app.run_server(mode=mode)
def fit_interactive(self, X, y=None, mode="external"): """ Fit a bucketer and then interactively edit the fit using a dash app. Note we are using a [jupyterdash](https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e) app, which supports 3 different modes: - 'external' (default): Start dash server and print URL - 'inline': Start dash app inside an Iframe in the jupyter notebook - 'jupyterlab': Start dash app as a new tab inside jupyterlab """ # We need to make sure we only fit if not already fitted # This prevents a user losing manually defined boundaries # when re-running .fit_interactive() if not is_fitted(self): self.fit(X, y) self.app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) add_basic_layout(self) add_bucketing_callbacks(self, X, y) self.app.run_server(mode=mode)
Python
def _update_column_fit(self, X, y, feature, special, splits, right, generate_summary=False): """ Extract out part of the fit for a column. Useful when we want to interactively update the fit. """ for step in self.steps: if feature in step[1].variables: step[1]._update_column_fit( X=X, y=y, feature=feature, special=special, splits=splits, right=right, generate_summary=generate_summary, )
def _update_column_fit(self, X, y, feature, special, splits, right, generate_summary=False): """ Extract out part of the fit for a column. Useful when we want to interactively update the fit. """ for step in self.steps: if feature in step[1].variables: step[1]._update_column_fit( X=X, y=y, feature=feature, special=special, splits=splits, right=right, generate_summary=generate_summary, )
Python
def to_skorecard_pipeline(pipeline: Pipeline) -> SkorecardPipeline: """ Transform a scikit-learn Pipeline to a SkorecardPipeline. A SkorecardPipeline is a normal scikit-learn pipeline with some extra methods and attributes. Example: ```python from skorecard.pipeline.pipeline import SkorecardPipeline, to_skorecard_pipeline from skorecard.bucketers import DecisionTreeBucketer, OrdinalCategoricalBucketer from skorecard import datasets from sklearn.pipeline import make_pipeline pipe = make_pipeline( DecisionTreeBucketer(variables = ["LIMIT_BAL", "BILL_AMT1"],max_n_bins=5), OrdinalCategoricalBucketer(variables = ["EDUCATION", "MARRIAGE"], tol =0.05) ) sk_pipe = to_skorecard_pipeline(pipe) df = datasets.load_uci_credit_card(as_frame=True) features = ["LIMIT_BAL", "BILL_AMT1", "EDUCATION", "MARRIAGE"] X = df[features] y = df["default"].values ``` Args: pipeline (Pipeline): `scikit-learn` pipeline instance. Returns: pipeline (skorecard.pipeline.SkorecardPipeline): modified pipeline instance. """ assert isinstance(pipeline, Pipeline) if isinstance(pipeline, SkorecardPipeline): return pipeline else: return SkorecardPipeline(steps=pipeline.steps, memory=pipeline.memory, verbose=pipeline.verbose)
def to_skorecard_pipeline(pipeline: Pipeline) -> SkorecardPipeline: """ Transform a scikit-learn Pipeline to a SkorecardPipeline. A SkorecardPipeline is a normal scikit-learn pipeline with some extra methods and attributes. Example: ```python from skorecard.pipeline.pipeline import SkorecardPipeline, to_skorecard_pipeline from skorecard.bucketers import DecisionTreeBucketer, OrdinalCategoricalBucketer from skorecard import datasets from sklearn.pipeline import make_pipeline pipe = make_pipeline( DecisionTreeBucketer(variables = ["LIMIT_BAL", "BILL_AMT1"],max_n_bins=5), OrdinalCategoricalBucketer(variables = ["EDUCATION", "MARRIAGE"], tol =0.05) ) sk_pipe = to_skorecard_pipeline(pipe) df = datasets.load_uci_credit_card(as_frame=True) features = ["LIMIT_BAL", "BILL_AMT1", "EDUCATION", "MARRIAGE"] X = df[features] y = df["default"].values ``` Args: pipeline (Pipeline): `scikit-learn` pipeline instance. Returns: pipeline (skorecard.pipeline.SkorecardPipeline): modified pipeline instance. """ assert isinstance(pipeline, Pipeline) if isinstance(pipeline, SkorecardPipeline): return pipeline else: return SkorecardPipeline(steps=pipeline.steps, memory=pipeline.memory, verbose=pipeline.verbose)
Python
def X_y(): """Set of X,y for testing the transformers.""" X = np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ) y = np.array([0, 0, 0, 1, 1, 1, 0, 0, 1]) return X, y
def X_y(): """Set of X,y for testing the transformers.""" X = np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ) y = np.array([0, 0, 0, 1, 1, 1, 0, 0, 1]) return X, y
Python
def X1_X2(): """Set of dataframes to test psi.""" X1 = pd.DataFrame( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], columns=["col1", "col2"] ) X2 = pd.DataFrame( [[0, 2], [3, 0], [0, 0], [1, 2], [0, 4], [2, 1], [1, 1], [2, 1], [1, 1]], columns=["col1", "col2"] ) return X1, X2
def X1_X2(): """Set of dataframes to test psi.""" X1 = pd.DataFrame( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], columns=["col1", "col2"] ) X2 = pd.DataFrame( [[0, 2], [3, 0], [0, 0], [1, 2], [0, 4], [2, 1], [1, 1], [2, 1], [1, 1]], columns=["col1", "col2"] ) return X1, X2
Python
def perc_data_bars(column): """ Display bar plots inside a dash DataTable cell. Assumes a value between 0 - 100. Adapted from: https://dash.plotly.com/datatable/conditional-formatting """ n_bins = 100 bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)] ranges = [float(x) for x in range(101)] styles = [] for i in range(1, len(bounds)): min_bound = ranges[i - 1] max_bound = ranges[i] max_bound_percentage = bounds[i] * 100 # For odd rows styles.append( { "if": { "filter_query": ( "{{{column}}} >= {min_bound}" + (" && {{{column}}} < {max_bound}" if (i < len(bounds) - 1) else "") ).format(column=column, min_bound=min_bound, max_bound=max_bound), "column_id": column, "row_index": "odd", }, "background": ( """ linear-gradient(90deg, #0074D9 0%, #0074D9 {max_bound_percentage}%, rgb(248, 248, 248) {max_bound_percentage}%, rgb(248, 248, 248) 100%) """.format( max_bound_percentage=max_bound_percentage ) ), "paddingBottom": 2, "paddingTop": 2, } ) # For even rows styles.append( { "if": { "filter_query": ( "{{{column}}} >= {min_bound}" + (" && {{{column}}} < {max_bound}" if (i < len(bounds) - 1) else "") ).format(column=column, min_bound=min_bound, max_bound=max_bound), "column_id": column, "row_index": "even", }, "background": ( """ linear-gradient(90deg, #0074D9 0%, #0074D9 {max_bound_percentage}%, white {max_bound_percentage}%, white 100%) """.format( max_bound_percentage=max_bound_percentage ) ), "paddingBottom": 2, "paddingTop": 2, } ) return styles
def perc_data_bars(column): """ Display bar plots inside a dash DataTable cell. Assumes a value between 0 - 100. Adapted from: https://dash.plotly.com/datatable/conditional-formatting """ n_bins = 100 bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)] ranges = [float(x) for x in range(101)] styles = [] for i in range(1, len(bounds)): min_bound = ranges[i - 1] max_bound = ranges[i] max_bound_percentage = bounds[i] * 100 # For odd rows styles.append( { "if": { "filter_query": ( "{{{column}}} >= {min_bound}" + (" && {{{column}}} < {max_bound}" if (i < len(bounds) - 1) else "") ).format(column=column, min_bound=min_bound, max_bound=max_bound), "column_id": column, "row_index": "odd", }, "background": ( """ linear-gradient(90deg, #0074D9 0%, #0074D9 {max_bound_percentage}%, rgb(248, 248, 248) {max_bound_percentage}%, rgb(248, 248, 248) 100%) """.format( max_bound_percentage=max_bound_percentage ) ), "paddingBottom": 2, "paddingTop": 2, } ) # For even rows styles.append( { "if": { "filter_query": ( "{{{column}}} >= {min_bound}" + (" && {{{column}}} < {max_bound}" if (i < len(bounds) - 1) else "") ).format(column=column, min_bound=min_bound, max_bound=max_bound), "column_id": column, "row_index": "even", }, "background": ( """ linear-gradient(90deg, #0074D9 0%, #0074D9 {max_bound_percentage}%, white {max_bound_percentage}%, white 100%) """.format( max_bound_percentage=max_bound_percentage ) ), "paddingBottom": 2, "paddingTop": 2, } ) return styles
Python
def colorize_cell(column): """Colourize the integer bucket number. We can safely assume max 20 buckets, as features are often binned to 3-7 buckets. We will cycle through them. """ styles = [] for i in range(-10, 21): styles.append( { "if": { # 'row_index': i, # number | 'odd' | 'even' "filter_query": f"{{{column}}} = '{i}'", "column_id": column, }, "backgroundColor": get_bucket_color(i), "color": "white", } ) return styles
def colorize_cell(column): """Colourize the integer bucket number. We can safely assume max 20 buckets, as features are often binned to 3-7 buckets. We will cycle through them. """ styles = [] for i in range(-10, 21): styles.append( { "if": { # 'row_index': i, # number | 'odd' | 'even' "filter_query": f"{{{column}}} = '{i}'", "column_id": column, }, "backgroundColor": get_bucket_color(i), "color": "white", } ) return styles
Python
def is_monotonic_increasing(x): """ Helper function to determine if a list is monotonically increasing. """ dx = np.diff(x) return np.all(dx >= 0)
def is_monotonic_increasing(x): """ Helper function to determine if a list is monotonically increasing. """ dx = np.diff(x) return np.all(dx >= 0)
Python
def is_increasing(x): """ Helper function to determine if a list is increasing. """ dx = np.diff(x) return np.all(dx > 0)
def is_increasing(x): """ Helper function to determine if a list is increasing. """ dx = np.diff(x) return np.all(dx > 0)
Python
def is_sequential(x): """ Helper function to determine if a list is monotonically increasing with step size 1. """ dx = np.diff(x) return np.all(np.isin(dx, [0, 1]))
def is_sequential(x): """ Helper function to determine if a list is monotonically increasing with step size 1. """ dx = np.diff(x) return np.all(np.isin(dx, [0, 1]))
Python
def add_bucketing_callbacks(self, X, y): """ Adds callbacks to the interactive bucketing app. Meant for normal bucketers, not two step BucketingProcess. """ app = self.app add_common_callbacks(self) @app.callback( [Output("input_map", "value")], [ Input("input_column", "value"), ], ) def update_input_map(col): """Update bucketer map.""" input_map = self.features_bucket_mapping_.get(col).map col_type = self.features_bucket_mapping_.get(col).type if col_type == "categorical": # We also allow for treating numerical as categoricals # if key is a string, we'll need to quote them if isinstance(list(input_map.keys())[0], str): str_repr = ",\n\t".join([f"'{k}': {v}" for k, v in input_map.items()]) else: str_repr = ",\n\t".join([f"{k}: {v}" for k, v in input_map.items()]) str_repr = f"{{\n\t{str_repr}\n}}" else: str_repr = str(input_map) return [str_repr] @app.callback( [Output("input_map_helptext", "children")], [ Input("input_column", "value"), ], ) def update_input_map_feedback(col): col_type = self.features_bucket_mapping_.get(col).type right = self.features_bucket_mapping_.get(col).right if col_type == "categorical": msg = "Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}" if col_type == "numerical" and right: msg = "Edit the prebucket mapping boundaries. " msg += "Values up to and including the boundary are put into a bucket (right=True)" if col_type == "numerical" and not right: msg = "Edit the prebucket mapping boundaries. " msg += "Values up to but not including the boundary are put into a bucket (right=False)" return [msg] @app.callback( [ Output("bucket_table", "data"), Output("graph-bucket", "figure"), Output("input_map", "invalid"), Output("input_map_feedback", "children"), ], [Input("input_map", "value")], [State("input_column", "value")], ) def get_bucket_table(input_map, col): """Loads the table and the figure, when the input_map changes.""" col_type = self.features_bucket_mapping_.get(col).type # Load the object from text input into python object if col_type == "numerical": try: input_map = json.loads(input_map) assert len(input_map) > 0 except Exception: msg = "Make sure the input is properly formatted as a list" return no_update, no_update, True, [msg] # validate input if not is_increasing(input_map): return no_update, no_update, True, ["Make sure the list values are in increasing order"] else: try: # note using ast.literal_eval is not safe # for use when you don't trust the user input # in this case, it's a local user using his/her own kernel # note: we're using literal_eval because JSON enforces quoted keys input_map = ast.literal_eval(input_map) # re-sort on value, key input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0]))) except Exception: msg = "Make sure the input is properly formatted as a dictionary" return no_update, no_update, True, [msg] # validate input if not min(input_map.values()) == 0: msg = "Dictionary values (buckets) must start at 0" return no_update, no_update, True, [msg] if not is_sequential(list(input_map.values())): msg = "Dictionary values (buckets) must be sequentially increasing with steps of 1" return no_update, no_update, True, [msg] # Update the fit for this specific column special = self.features_bucket_mapping_.get(col).specials right = self.features_bucket_mapping_.get(col).right # Note we passed X, y to add_bucketing_callbacks() so they are available here. # make sure to re-generate the summary table self._update_column_fit( X=X, y=y, feature=col, special=special, splits=input_map, right=right, generate_summary=True ) # Retrieve the new bucket tables and plots table = self.bucket_table(col) # unsupervised bucketers don't have an event rate. if "Event Rate" in table.columns: table["Event Rate"] = round(table["Event Rate"] * 100, 2) fig = self.plot_bucket(col) # remove title from plot fig.update_layout(title="") return table.to_dict("records"), fig, False, no_update
def add_bucketing_callbacks(self, X, y): """ Adds callbacks to the interactive bucketing app. Meant for normal bucketers, not two step BucketingProcess. """ app = self.app add_common_callbacks(self) @app.callback( [Output("input_map", "value")], [ Input("input_column", "value"), ], ) def update_input_map(col): """Update bucketer map.""" input_map = self.features_bucket_mapping_.get(col).map col_type = self.features_bucket_mapping_.get(col).type if col_type == "categorical": # We also allow for treating numerical as categoricals # if key is a string, we'll need to quote them if isinstance(list(input_map.keys())[0], str): str_repr = ",\n\t".join([f"'{k}': {v}" for k, v in input_map.items()]) else: str_repr = ",\n\t".join([f"{k}: {v}" for k, v in input_map.items()]) str_repr = f"{{\n\t{str_repr}\n}}" else: str_repr = str(input_map) return [str_repr] @app.callback( [Output("input_map_helptext", "children")], [ Input("input_column", "value"), ], ) def update_input_map_feedback(col): col_type = self.features_bucket_mapping_.get(col).type right = self.features_bucket_mapping_.get(col).right if col_type == "categorical": msg = "Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}" if col_type == "numerical" and right: msg = "Edit the prebucket mapping boundaries. " msg += "Values up to and including the boundary are put into a bucket (right=True)" if col_type == "numerical" and not right: msg = "Edit the prebucket mapping boundaries. " msg += "Values up to but not including the boundary are put into a bucket (right=False)" return [msg] @app.callback( [ Output("bucket_table", "data"), Output("graph-bucket", "figure"), Output("input_map", "invalid"), Output("input_map_feedback", "children"), ], [Input("input_map", "value")], [State("input_column", "value")], ) def get_bucket_table(input_map, col): """Loads the table and the figure, when the input_map changes.""" col_type = self.features_bucket_mapping_.get(col).type # Load the object from text input into python object if col_type == "numerical": try: input_map = json.loads(input_map) assert len(input_map) > 0 except Exception: msg = "Make sure the input is properly formatted as a list" return no_update, no_update, True, [msg] # validate input if not is_increasing(input_map): return no_update, no_update, True, ["Make sure the list values are in increasing order"] else: try: # note using ast.literal_eval is not safe # for use when you don't trust the user input # in this case, it's a local user using his/her own kernel # note: we're using literal_eval because JSON enforces quoted keys input_map = ast.literal_eval(input_map) # re-sort on value, key input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0]))) except Exception: msg = "Make sure the input is properly formatted as a dictionary" return no_update, no_update, True, [msg] # validate input if not min(input_map.values()) == 0: msg = "Dictionary values (buckets) must start at 0" return no_update, no_update, True, [msg] if not is_sequential(list(input_map.values())): msg = "Dictionary values (buckets) must be sequentially increasing with steps of 1" return no_update, no_update, True, [msg] # Update the fit for this specific column special = self.features_bucket_mapping_.get(col).specials right = self.features_bucket_mapping_.get(col).right # Note we passed X, y to add_bucketing_callbacks() so they are available here. # make sure to re-generate the summary table self._update_column_fit( X=X, y=y, feature=col, special=special, splits=input_map, right=right, generate_summary=True ) # Retrieve the new bucket tables and plots table = self.bucket_table(col) # unsupervised bucketers don't have an event rate. if "Event Rate" in table.columns: table["Event Rate"] = round(table["Event Rate"] * 100, 2) fig = self.plot_bucket(col) # remove title from plot fig.update_layout(title="") return table.to_dict("records"), fig, False, no_update
Python
def add_common_callbacks(self): """ Add dash callbacks. Common callbacks for the normal bucketer app and the BucketingProcess app. """ app = self.app @app.callback( [ Output("column_title", "children"), Output("column_type", "children"), ], [ Input("input_column", "value"), ], ) def update_column_title(col): """Update the content title.""" col_type = self.features_bucket_mapping_.get(col).type return [f"Feature '{col}'"], [col_type] @app.callback( [Output("code_export", "content")], [Input("input_map", "value")], ) def update_code_export(input_map): return [f"UserInputBucketer({self.features_bucket_mapping_.as_dict()})"]
def add_common_callbacks(self): """ Add dash callbacks. Common callbacks for the normal bucketer app and the BucketingProcess app. """ app = self.app @app.callback( [ Output("column_title", "children"), Output("column_type", "children"), ], [ Input("input_column", "value"), ], ) def update_column_title(col): """Update the content title.""" col_type = self.features_bucket_mapping_.get(col).type return [f"Feature '{col}'"], [col_type] @app.callback( [Output("code_export", "content")], [Input("input_map", "value")], ) def update_code_export(input_map): return [f"UserInputBucketer({self.features_bucket_mapping_.as_dict()})"]
Python
def X_y(): """Set of X,y for testing the transformers.""" X = pd.DataFrame( np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ), columns=["col1", "col2"], ) y = pd.Series(np.array([0, 0, 0, 1, 1, 1, 0, 0, 1])) return X, y
def X_y(): """Set of X,y for testing the transformers.""" X = pd.DataFrame( np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ), columns=["col1", "col2"], ) y = pd.Series(np.array([0, 0, 0, 1, 1, 1, 0, 0, 1])) return X, y
Python
def X_y_2(): """Set of X,y for testing the transformers. In the first column, bucket 3 is not present in class 1. """ X = pd.DataFrame( np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ), columns=["col1", "col2"], ) y = pd.Series(np.array([0, 0, 0, 0, 0, 1, 1, 1, 1])) return X, y
def X_y_2(): """Set of X,y for testing the transformers. In the first column, bucket 3 is not present in class 1. """ X = pd.DataFrame( np.array( [[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], np.int32, ), columns=["col1", "col2"], ) y = pd.Series(np.array([0, 0, 0, 0, 0, 1, 1, 1, 1])) return X, y
Python
def df_with_missings(df): """ Add missing values to above df. """ df_with_missings = df.copy() for col in ["EDUCATION", "MARRIAGE", "BILL_AMT1", "LIMIT_BAL", "pet_ownership"]: df_with_missings.loc[df_with_missings.sample(frac=0.2, random_state=42).index, col] = np.nan # Make sure there are 8 unique values (7 unique plus some NA) assert len(df_with_missings["EDUCATION"].unique()) == 8 # Make sure there are NAs assert any([np.isnan(x) for x in df_with_missings["EDUCATION"].unique().tolist()]) return df_with_missings
def df_with_missings(df): """ Add missing values to above df. """ df_with_missings = df.copy() for col in ["EDUCATION", "MARRIAGE", "BILL_AMT1", "LIMIT_BAL", "pet_ownership"]: df_with_missings.loc[df_with_missings.sample(frac=0.2, random_state=42).index, col] = np.nan # Make sure there are 8 unique values (7 unique plus some NA) assert len(df_with_missings["EDUCATION"].unique()) == 8 # Make sure there are NAs assert any([np.isnan(x) for x in df_with_missings["EDUCATION"].unique().tolist()]) return df_with_missings
Python
def load_uci_credit_card(return_X_y=False, as_frame=False): """Loads the UCI Credit Card Dataset. This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset). Example: ```python from skorecard import datasets df = datasets.load_uci_credit_card(as_frame=True) ``` Args: return_X_y (bool): If True, returns `(data, target)` instead of a dict object. as_frame (bool): give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ # noqa file = pkgutil.get_data("skorecard", "data/UCI_Credit_Card.zip") df = pd.read_csv(io.BytesIO(file), compression="zip") df = df.rename(columns={"default.payment.next.month": "default"}) if as_frame: return df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1", "default"]] X, y = ( df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1"]], df["default"].values, ) if return_X_y: return X, y return {"data": X, "target": y}
def load_uci_credit_card(return_X_y=False, as_frame=False): """Loads the UCI Credit Card Dataset. This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset). Example: ```python from skorecard import datasets df = datasets.load_uci_credit_card(as_frame=True) ``` Args: return_X_y (bool): If True, returns `(data, target)` instead of a dict object. as_frame (bool): give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ # noqa file = pkgutil.get_data("skorecard", "data/UCI_Credit_Card.zip") df = pd.read_csv(io.BytesIO(file), compression="zip") df = df.rename(columns={"default.payment.next.month": "default"}) if as_frame: return df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1", "default"]] X, y = ( df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1"]], df["default"].values, ) if return_X_y: return X, y return {"data": X, "target": y}
Python
def load_credit_card(return_X_y=False, as_frame=False): """ Loads the complete UCI Credit Card Dataset, by fetching it from open_ml. Args: return_X_y: (bool) If True, returns ``(data, target)`` instead of a dict object. as_frame: (bool) give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ try: data = fetch_openml( name="default-of-credit-card-clients", data_home=None, cache=True, as_frame=as_frame, return_X_y=return_X_y, ) except Exception as e: # update the error message with a more helpful message. error_msg = ( "Cannot retrieve the dataset from repository. Make sure there is no firewall blocking " "the connection.\nAlternatively, download it manually from https://www.openml.org/d/42477" ) raise type(e)(f"{e.args[0]}\n{error_msg}") # The target is by default encoded as a string. # Ensure it is returned as a integer. if as_frame: data = data["frame"] data["y"] = data["y"].astype(int) if return_X_y: X = data[0] y = data[1] y = y.astype(int) return X, y return data
def load_credit_card(return_X_y=False, as_frame=False): """ Loads the complete UCI Credit Card Dataset, by fetching it from open_ml. Args: return_X_y: (bool) If True, returns ``(data, target)`` instead of a dict object. as_frame: (bool) give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ try: data = fetch_openml( name="default-of-credit-card-clients", data_home=None, cache=True, as_frame=as_frame, return_X_y=return_X_y, ) except Exception as e: # update the error message with a more helpful message. error_msg = ( "Cannot retrieve the dataset from repository. Make sure there is no firewall blocking " "the connection.\nAlternatively, download it manually from https://www.openml.org/d/42477" ) raise type(e)(f"{e.args[0]}\n{error_msg}") # The target is by default encoded as a string. # Ensure it is returned as a integer. if as_frame: data = data["frame"] data["y"] = data["y"].astype(int) if return_X_y: X = data[0] y = data[1] y = y.astype(int) return X, y return data
Python
def _filter_specials_for_fit(X, y, specials: Dict): """ We need to filter out the specials from a vector. Because we don't want to use those values to determine bin boundaries. """ flt_vals = list(itertools.chain(*specials.values())) flt = X.isin(flt_vals) X_out = X[~flt] if y is not None: y_out = y[~flt] else: y_out = y return X_out, y_out
def _filter_specials_for_fit(X, y, specials: Dict): """ We need to filter out the specials from a vector. Because we don't want to use those values to determine bin boundaries. """ flt_vals = list(itertools.chain(*specials.values())) flt = X.isin(flt_vals) X_out = X[~flt] if y is not None: y_out = y[~flt] else: y_out = y return X_out, y_out
Python
def _find_missing_bucket(self, feature): """ Used for when missing_treatment is in: ["most_frequent", "most_risky", "least_risky", "neutral", "similar", "passthrough"] Calculates the new bucket for us to put the missing values in. """ if self.missing_treatment == "most_frequent": most_frequent_row = ( self.bucket_tables_[feature].sort_values("Count", ascending=False).reset_index(drop=True).iloc[0] ) if most_frequent_row["label"] != "Missing": missing_bucket = int(most_frequent_row["bucket_id"]) else: # missings are already the most common bucket, pick the next one missing_bucket = int( self.bucket_tables_[feature] .sort_values("Count", ascending=False) .reset_index(drop=True)["bucket_id"][1] ) elif self.missing_treatment in ["most_risky", "least_risky"]: if self.missing_treatment == "least_risky": ascending = True else: ascending = False # if fitted with .fit(X) and not .fit(X, y) if "Event" not in self.bucket_tables_[feature].columns: raise AttributeError("bucketer must be fit with y to determine the risk rates") missing_bucket = int( self.bucket_tables_[feature][self.bucket_tables_[feature]["bucket_id"] >=0] .sort_values("Event Rate", ascending=ascending) .reset_index(drop=True) .iloc[0]["bucket_id"] ) elif self.missing_treatment in ["neutral"]: table = self.bucket_tables_[feature] table["WoE"] = np.abs(table["WoE"]) missing_bucket = int( table[table["Count"] > 0].sort_values("WoE").reset_index(drop=True).iloc[0]["bucket_id"] ) elif self.missing_treatment in ["similar"]: table = self.bucket_tables_[feature] missing_WoE = table[table["label"] == "Missing"]["WoE"].values[0] table["New_WoE"] = np.abs(table["WoE"] - missing_WoE) missing_bucket = int( table[table["label"] != "Missing"].sort_values("New_WoE").reset_index(drop=True).iloc[0]["bucket_id"] ) elif self.missing_treatment in ["passthrough"]: missing_bucket = np.nan else: raise AssertionError(f"Invalid missing treatment '{self.missing_treatment}' specified") return missing_bucket
def _find_missing_bucket(self, feature): """ Used for when missing_treatment is in: ["most_frequent", "most_risky", "least_risky", "neutral", "similar", "passthrough"] Calculates the new bucket for us to put the missing values in. """ if self.missing_treatment == "most_frequent": most_frequent_row = ( self.bucket_tables_[feature].sort_values("Count", ascending=False).reset_index(drop=True).iloc[0] ) if most_frequent_row["label"] != "Missing": missing_bucket = int(most_frequent_row["bucket_id"]) else: # missings are already the most common bucket, pick the next one missing_bucket = int( self.bucket_tables_[feature] .sort_values("Count", ascending=False) .reset_index(drop=True)["bucket_id"][1] ) elif self.missing_treatment in ["most_risky", "least_risky"]: if self.missing_treatment == "least_risky": ascending = True else: ascending = False # if fitted with .fit(X) and not .fit(X, y) if "Event" not in self.bucket_tables_[feature].columns: raise AttributeError("bucketer must be fit with y to determine the risk rates") missing_bucket = int( self.bucket_tables_[feature][self.bucket_tables_[feature]["bucket_id"] >=0] .sort_values("Event Rate", ascending=ascending) .reset_index(drop=True) .iloc[0]["bucket_id"] ) elif self.missing_treatment in ["neutral"]: table = self.bucket_tables_[feature] table["WoE"] = np.abs(table["WoE"]) missing_bucket = int( table[table["Count"] > 0].sort_values("WoE").reset_index(drop=True).iloc[0]["bucket_id"] ) elif self.missing_treatment in ["similar"]: table = self.bucket_tables_[feature] missing_WoE = table[table["label"] == "Missing"]["WoE"].values[0] table["New_WoE"] = np.abs(table["WoE"] - missing_WoE) missing_bucket = int( table[table["label"] != "Missing"].sort_values("New_WoE").reset_index(drop=True).iloc[0]["bucket_id"] ) elif self.missing_treatment in ["passthrough"]: missing_bucket = np.nan else: raise AssertionError(f"Invalid missing treatment '{self.missing_treatment}' specified") return missing_bucket
Python
def _filter_na_for_fit(self, X: pd.DataFrame, y): """ We need to filter out the missing values from a vector. Because we don't want to use those values to determine bin boundaries. Note pd.DataFrame.isna and pd.DataFrame.isnull are identical """ # let's also treat infinite values as NA # scikit-learn's check_estimator might throw those at us with pd.option_context("mode.use_inf_as_na", True): flt = pd.isna(X).values X_out = X[~flt] if y is not None and len(y) > 0: y_out = y[~flt] else: y_out = y return X_out, y_out
def _filter_na_for_fit(self, X: pd.DataFrame, y): """ We need to filter out the missing values from a vector. Because we don't want to use those values to determine bin boundaries. Note pd.DataFrame.isna and pd.DataFrame.isnull are identical """ # let's also treat infinite values as NA # scikit-learn's check_estimator might throw those at us with pd.option_context("mode.use_inf_as_na", True): flt = pd.isna(X).values X_out = X[~flt] if y is not None and len(y) > 0: y_out = y[~flt] else: y_out = y return X_out, y_out
Python
def _verify_specials_variables(specials: Dict, variables: List) -> None: """ Make sure all specials columns are also in the data. """ diff = set(specials.keys()).difference(set(variables)) if len(diff) > 0: raise ValueError(f"Features {diff} are defined in the specials dictionary, but not in the variables.")
def _verify_specials_variables(specials: Dict, variables: List) -> None: """ Make sure all specials columns are also in the data. """ diff = set(specials.keys()).difference(set(variables)) if len(diff) > 0: raise ValueError(f"Features {diff} are defined in the specials dictionary, but not in the variables.")
Python
def _update_column_fit(self, X, y, feature, special, splits, right, generate_summary=False): """ Extract out part of the fit for a column. Useful when we want to interactively update the fit. """ # Deal with missing values missing_bucket = None if isinstance(self.missing_treatment, dict): missing_bucket = self.missing_treatment.get(feature) self.features_bucket_mapping_[feature] = BucketMapping( feature_name=feature, type=self.variables_type, missing_bucket=missing_bucket, map=splits, right=right, specials=special, ) # Calculate the bucket table self.bucket_tables_[feature] = build_bucket_table( X, y, column=feature, bucket_mapping=self.features_bucket_mapping_.get(feature), ) if self.missing_treatment in [ "most_frequent", "most_risky", "least_risky", "neutral", "similar", "passthrough", ]: missing_bucket = self._find_missing_bucket(feature=feature) # Repeat above procedure now we know the bucket distribution self.features_bucket_mapping_[feature] = BucketMapping( feature_name=feature, type=self.variables_type, missing_bucket=missing_bucket, map=splits, right=right, specials=special, ) # Recalculate the bucket table with the new bucket for missings self.bucket_tables_[feature] = build_bucket_table( X, y, column=feature, bucket_mapping=self.features_bucket_mapping_.get(feature), ) if generate_summary: self._generate_summary(X, y)
def _update_column_fit(self, X, y, feature, special, splits, right, generate_summary=False): """ Extract out part of the fit for a column. Useful when we want to interactively update the fit. """ # Deal with missing values missing_bucket = None if isinstance(self.missing_treatment, dict): missing_bucket = self.missing_treatment.get(feature) self.features_bucket_mapping_[feature] = BucketMapping( feature_name=feature, type=self.variables_type, missing_bucket=missing_bucket, map=splits, right=right, specials=special, ) # Calculate the bucket table self.bucket_tables_[feature] = build_bucket_table( X, y, column=feature, bucket_mapping=self.features_bucket_mapping_.get(feature), ) if self.missing_treatment in [ "most_frequent", "most_risky", "least_risky", "neutral", "similar", "passthrough", ]: missing_bucket = self._find_missing_bucket(feature=feature) # Repeat above procedure now we know the bucket distribution self.features_bucket_mapping_[feature] = BucketMapping( feature_name=feature, type=self.variables_type, missing_bucket=missing_bucket, map=splits, right=right, specials=special, ) # Recalculate the bucket table with the new bucket for missings self.bucket_tables_[feature] = build_bucket_table( X, y, column=feature, bucket_mapping=self.features_bucket_mapping_.get(feature), ) if generate_summary: self._generate_summary(X, y)
Python
def fit_interactive(self, X, y=None, mode="external", **server_kwargs): """ Fit a bucketer and then interactive edit the fit using a dash app. Note we are using a [jupyterdash](https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e) app, which supports 3 different modes: - 'external' (default): Start dash server and print URL - 'inline': Start dash app inside an Iframe in the jupyter notebook - 'jupyterlab': Start dash app as a new tab inside jupyterlab """ # We need to make sure we only fit if not already fitted # This prevents a user loosing manually defined boundaries # when re-running .fit_interactive() if not is_fitted(self): self.fit(X, y) self.app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) add_basic_layout(self) add_bucketing_callbacks(self, X, y) self.app.run_server(mode=mode, **server_kwargs)
def fit_interactive(self, X, y=None, mode="external", **server_kwargs): """ Fit a bucketer and then interactive edit the fit using a dash app. Note we are using a [jupyterdash](https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e) app, which supports 3 different modes: - 'external' (default): Start dash server and print URL - 'inline': Start dash app inside an Iframe in the jupyter notebook - 'jupyterlab': Start dash app as a new tab inside jupyterlab """ # We need to make sure we only fit if not already fitted # This prevents a user loosing manually defined boundaries # when re-running .fit_interactive() if not is_fitted(self): self.fit(X, y) self.app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) add_basic_layout(self) add_bucketing_callbacks(self, X, y) self.app.run_server(mode=mode, **server_kwargs)
Python
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame: """Transforms an array into the corresponding buckets fitted by the Transformer. Args: X (pd.DataFrame): dataframe which will be transformed into the corresponding buckets y (array): target Returns: df (pd.DataFrame): dataset with transformed features """ check_is_fitted(self) X = ensure_dataframe(X) y = self._check_y(y) if y is not None: assert len(y) == X.shape[0], "y and X not same length" # If bucketer was fitted, make sure X has same columns as train if hasattr(self, "n_train_features_"): if X.shape[1] != self.n_train_features_: raise ValueError("number of features in transform is different from the number of features in fit") # Some bucketers do not have a .fit() method # and if user did not specify any variables # use all the variables defined in the features_bucket_mapping if not hasattr(self, "variables_"): if self.variables == []: self.variables_ = list(self.features_bucket_mapping_.maps.keys()) else: self.variables_ = self.variables for feature in self.variables_: bucket_mapping = self.features_bucket_mapping_.get(feature) X[feature] = bucket_mapping.transform(X[feature]) if self.remainder == "drop": return X[self.variables_] else: return X
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame: """Transforms an array into the corresponding buckets fitted by the Transformer. Args: X (pd.DataFrame): dataframe which will be transformed into the corresponding buckets y (array): target Returns: df (pd.DataFrame): dataset with transformed features """ check_is_fitted(self) X = ensure_dataframe(X) y = self._check_y(y) if y is not None: assert len(y) == X.shape[0], "y and X not same length" # If bucketer was fitted, make sure X has same columns as train if hasattr(self, "n_train_features_"): if X.shape[1] != self.n_train_features_: raise ValueError("number of features in transform is different from the number of features in fit") # Some bucketers do not have a .fit() method # and if user did not specify any variables # use all the variables defined in the features_bucket_mapping if not hasattr(self, "variables_"): if self.variables == []: self.variables_ = list(self.features_bucket_mapping_.maps.keys()) else: self.variables_ = self.variables for feature in self.variables_: bucket_mapping = self.features_bucket_mapping_.get(feature) X[feature] = bucket_mapping.transform(X[feature]) if self.remainder == "drop": return X[self.variables_] else: return X
Python
def predict(self, X: pd.DataFrame): """Applies the transform method. To be used for the grid searches. Args: X (pd.DataFrame): The numerical data which will be transformed into the corresponding buckets Returns: y (np.array): Transformed X, such that the values of X are replaced by the corresponding bucket numbers """ return self.transform(X)
def predict(self, X: pd.DataFrame): """Applies the transform method. To be used for the grid searches. Args: X (pd.DataFrame): The numerical data which will be transformed into the corresponding buckets Returns: y (np.array): Transformed X, such that the values of X are replaced by the corresponding bucket numbers """ return self.transform(X)
Python
def predict_proba(self, X: pd.DataFrame): """Applies the transform method. To be used for the grid searches. Args: X (pd.DataFrame): The numerical data which will be transformed into the corresponding buckets Returns: yhat (np.array): transformed X, such that the values of X are replaced by the corresponding bucket numbers """ return self.transform(X)
def predict_proba(self, X: pd.DataFrame): """Applies the transform method. To be used for the grid searches. Args: X (pd.DataFrame): The numerical data which will be transformed into the corresponding buckets Returns: yhat (np.array): transformed X, such that the values of X are replaced by the corresponding bucket numbers """ return self.transform(X)
Python
def save_yml(self, fout: PathLike) -> None: """ Save the features bucket to a yaml file. Args: fout: file output """ check_is_fitted(self) if isinstance(self.features_bucket_mapping_, dict): FeaturesBucketMapping(self.features_bucket_mapping_).save_yml(fout) else: self.features_bucket_mapping_.save_yml(fout)
def save_yml(self, fout: PathLike) -> None: """ Save the features bucket to a yaml file. Args: fout: file output """ check_is_fitted(self) if isinstance(self.features_bucket_mapping_, dict): FeaturesBucketMapping(self.features_bucket_mapping_).save_yml(fout) else: self.features_bucket_mapping_.save_yml(fout)
Python
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"binary_only": True, "allow_nan": True}
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"binary_only": True, "allow_nan": True}
Python
def fit(self, X, y): """Calculate the WOE for every column. Args: X (np.array): (binned) features y (np.array): target """ assert self.epsilon >= 0 # Check data X = ensure_dataframe(X) assert y is not None, "WoEBucketer needs a target y" y = BaseBucketer._check_y(y) y = y.astype(float) if len(np.unique(y)) > 2: raise AssertionError("WoEBucketer is only suited for binary classification") self.variables_ = BaseBucketer._check_variables(X, self.variables) # WoE currently does not support NAs # This is also flagged in self._more_tags() # We could treat missing values as a separate bin (-1) and thus handle seamlessly. BaseBucketer._check_contains_na(X, self.variables_) # scikit-learn requires checking that X has same shape on transform # this is because scikit-learn is still positional based (no column names used) self.n_train_features_ = X.shape[1] self.woe_mapping_ = {} for var in self.variables_: t = woe_1d(X[var], y, epsilon=self.epsilon) woe_dict = t["woe"].to_dict() # If new categories encountered, returns WoE = 0 if self.handle_unknown == "value": woe_dict = defaultdict(int, woe_dict) self.woe_mapping_[var] = woe_dict return self
def fit(self, X, y): """Calculate the WOE for every column. Args: X (np.array): (binned) features y (np.array): target """ assert self.epsilon >= 0 # Check data X = ensure_dataframe(X) assert y is not None, "WoEBucketer needs a target y" y = BaseBucketer._check_y(y) y = y.astype(float) if len(np.unique(y)) > 2: raise AssertionError("WoEBucketer is only suited for binary classification") self.variables_ = BaseBucketer._check_variables(X, self.variables) # WoE currently does not support NAs # This is also flagged in self._more_tags() # We could treat missing values as a separate bin (-1) and thus handle seamlessly. BaseBucketer._check_contains_na(X, self.variables_) # scikit-learn requires checking that X has same shape on transform # this is because scikit-learn is still positional based (no column names used) self.n_train_features_ = X.shape[1] self.woe_mapping_ = {} for var in self.variables_: t = woe_1d(X[var], y, epsilon=self.epsilon) woe_dict = t["woe"].to_dict() # If new categories encountered, returns WoE = 0 if self.handle_unknown == "value": woe_dict = defaultdict(int, woe_dict) self.woe_mapping_[var] = woe_dict return self
Python
def transform(self, X): """Transform X to weight of evidence encoding. Args: X (pd.DataFrame): dataset """ assert self.handle_unknown in ["value", "error", "return_nan"] check_is_fitted(self) X = ensure_dataframe(X) if X.shape[1] != self.n_train_features_: msg = f"Number of features in X ({X.shape[1]}) is different " msg += f"from the number of features in X during fit ({self.n_train_features_})" raise ValueError(msg) for feature in self.variables_: woe_dict = self.woe_mapping_.get(feature) if self.handle_unknown == "error": new_cats = [x for x in list(X[feature].unique()) if x not in list(woe_dict.keys())] if len(new_cats) > 0: msg = "WoEEncoder encountered unknown new categories " msg += f"in column {feature} on .transform(): {new_cats}" raise AssertionError(msg) X[feature] = X[feature].map(woe_dict) return X
def transform(self, X): """Transform X to weight of evidence encoding. Args: X (pd.DataFrame): dataset """ assert self.handle_unknown in ["value", "error", "return_nan"] check_is_fitted(self) X = ensure_dataframe(X) if X.shape[1] != self.n_train_features_: msg = f"Number of features in X ({X.shape[1]}) is different " msg += f"from the number of features in X during fit ({self.n_train_features_})" raise ValueError(msg) for feature in self.variables_: woe_dict = self.woe_mapping_.get(feature) if self.handle_unknown == "error": new_cats = [x for x in list(X[feature].unique()) if x not in list(woe_dict.keys())] if len(new_cats) > 0: msg = "WoEEncoder encountered unknown new categories " msg += f"in column {feature} on .transform(): {new_cats}" raise AssertionError(msg) X[feature] = X[feature].map(woe_dict) return X
Python
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"binary_only": True, "allow_nan": False}
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"binary_only": True, "allow_nan": False}
Python
def append(self, bucketmap: BucketMapping) -> None: """Add a BucketMapping to the collection. Args: bucketmap (BucketMapping): map of a feature """ assert isinstance(bucketmap, BucketMapping) self.maps[bucketmap.feature_name] = bucketmap
def append(self, bucketmap: BucketMapping) -> None: """Add a BucketMapping to the collection. Args: bucketmap (BucketMapping): map of a feature """ assert isinstance(bucketmap, BucketMapping) self.maps[bucketmap.feature_name] = bucketmap
Python
def load_yml(self) -> None: """Should load in data from a yml. Returns: None: nothing """ raise NotImplementedError("todo")
def load_yml(self) -> None: """Should load in data from a yml. Returns: None: nothing """ raise NotImplementedError("todo")
Python
def save_yml(self, file) -> None: """Should write data to a yml. Returns: None: nothing """ if isinstance(file, str): file = open(file, "w") yaml.safe_dump(self.as_dict(), file)
def save_yml(self, file) -> None: """Should write data to a yml. Returns: None: nothing """ if isinstance(file, str): file = open(file, "w") yaml.safe_dump(self.as_dict(), file)
Python
def load_dict(self, obj): """Should load in data from a python dict. Args: obj (dict): Dict with names of features and their BucketMapping Returns: None: nothing """ assert isinstance(obj, dict) self.maps = {} for feature, bucketmap in obj.items(): self.append(BucketMapping(**bucketmap))
def load_dict(self, obj): """Should load in data from a python dict. Args: obj (dict): Dict with names of features and their BucketMapping Returns: None: nothing """ assert isinstance(obj, dict) self.maps = {} for feature, bucketmap in obj.items(): self.append(BucketMapping(**bucketmap))
Python
def as_dict(self): """Returns data in class as a dict. Returns: dict: Data in class """ return {k: dataclasses.asdict(v) for k, v in self.maps.items()}
def as_dict(self): """Returns data in class as a dict. Returns: dict: Data in class """ return {k: dataclasses.asdict(v) for k, v in self.maps.items()}
Python
def merge_features_bucket_mapping(a: FeaturesBucketMapping, b: FeaturesBucketMapping) -> FeaturesBucketMapping: """ Merge two sets of sequentual FeatureBucketMapping. If there are unique features, we'll add them as-in. """ assert isinstance(a, FeaturesBucketMapping) assert isinstance(b, FeaturesBucketMapping) cols_in_both = [col for col in a.columns if col in b.columns] cols_in_a = [col for col in a.columns if col not in b.columns] cols_in_b = [col for col in b.columns if col not in a.columns] features_bucket_mapping = FeaturesBucketMapping() for col in cols_in_both: c = merge_bucket_mapping(a.get(col), b.get(col)) features_bucket_mapping.append(c) for col in cols_in_a: features_bucket_mapping.append(a.get(col)) for col in cols_in_b: features_bucket_mapping.append(b.get(col)) return features_bucket_mapping
def merge_features_bucket_mapping(a: FeaturesBucketMapping, b: FeaturesBucketMapping) -> FeaturesBucketMapping: """ Merge two sets of sequentual FeatureBucketMapping. If there are unique features, we'll add them as-in. """ assert isinstance(a, FeaturesBucketMapping) assert isinstance(b, FeaturesBucketMapping) cols_in_both = [col for col in a.columns if col in b.columns] cols_in_a = [col for col in a.columns if col not in b.columns] cols_in_b = [col for col in b.columns if col not in a.columns] features_bucket_mapping = FeaturesBucketMapping() for col in cols_in_both: c = merge_bucket_mapping(a.get(col), b.get(col)) features_bucket_mapping.append(c) for col in cols_in_a: features_bucket_mapping.append(a.get(col)) for col in cols_in_b: features_bucket_mapping.append(b.get(col)) return features_bucket_mapping
Python
def is_fitted(estimator) -> bool: """ Checks if an estimator is fitted. Loosely taken from https://github.com/scikit-learn/scikit-learn/blob/2beed5584/sklearn/utils/validation.py#L1034 """ # noqa if not hasattr(estimator, "fit"): raise TypeError("%s is not an estimator instance." % (estimator)) attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")] return len(attrs) > 0
def is_fitted(estimator) -> bool: """ Checks if an estimator is fitted. Loosely taken from https://github.com/scikit-learn/scikit-learn/blob/2beed5584/sklearn/utils/validation.py#L1034 """ # noqa if not hasattr(estimator, "fit"): raise TypeError("%s is not an estimator instance." % (estimator)) attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")] return len(attrs) > 0
Python
def ensure_dataframe(X: pd.DataFrame) -> pd.DataFrame: """ Make sure X is a pandas DataFrame. """ # checks if the input is a dataframe. if not isinstance(X, pd.DataFrame): # Convert X to pd.DataFrame. Not recommended, # as you'll lose column name info. # but bucketer will still work on numpy matrix # also required for full scikitlearn compatibility X = X.copy() X = check_array(X, force_all_finite=False, accept_sparse=False, dtype=None) X = pd.DataFrame(X) X.columns = list(X.columns) # sometimes columns can be a RangeIndex.. else: # Create a copy # important not to transform the original dataset. X = X.copy() if X.shape[0] == 0: raise ValueError("Dataset has no rows!") if X.shape[1] == 0: raise ValueError(f"0 feature(s) (shape=({X.shape[0]}, 0)) while a minimum of 1 is required.") return X
def ensure_dataframe(X: pd.DataFrame) -> pd.DataFrame: """ Make sure X is a pandas DataFrame. """ # checks if the input is a dataframe. if not isinstance(X, pd.DataFrame): # Convert X to pd.DataFrame. Not recommended, # as you'll lose column name info. # but bucketer will still work on numpy matrix # also required for full scikitlearn compatibility X = X.copy() X = check_array(X, force_all_finite=False, accept_sparse=False, dtype=None) X = pd.DataFrame(X) X.columns = list(X.columns) # sometimes columns can be a RangeIndex.. else: # Create a copy # important not to transform the original dataset. X = X.copy() if X.shape[0] == 0: raise ValueError("Dataset has no rows!") if X.shape[1] == 0: raise ValueError(f"0 feature(s) (shape=({X.shape[0]}, 0)) while a minimum of 1 is required.") return X
Python
def check_args(args: Dict, obj): """ Checks if keys from args dictionary are valid args to an object. Note: this assumes 'obj' is scikit-learn compatible and thus has .get_params() implemented. """ valid_args = obj().get_params() for arg in args.keys(): if arg not in valid_args: msg = f"Argument '{arg}' is not a valid argument for object '{obj}'" warnings.warn(msg)
def check_args(args: Dict, obj): """ Checks if keys from args dictionary are valid args to an object. Note: this assumes 'obj' is scikit-learn compatible and thus has .get_params() implemented. """ valid_args = obj().get_params() for arg in args.keys(): if arg not in valid_args: msg = f"Argument '{arg}' is not a valid argument for object '{obj}'" warnings.warn(msg)
Python
def _validate_categorical_map(self): """ Validate map structure. Assures that the provided mapping starts at 0 and that it has an incremental trend. """ values = [v for v in self.map.values()] if len(values) > 0: if not np.array_equal(np.unique(values), np.arange(max(values) + 1)): err_msg = ( f"Mapping dictionary must start at 0 and be incremental. " f"Found the following mappings {np.unique(values)}, and expected {np.arange(max(values) + 1)}" ) raise ValueError(err_msg)
def _validate_categorical_map(self): """ Validate map structure. Assures that the provided mapping starts at 0 and that it has an incremental trend. """ values = [v for v in self.map.values()] if len(values) > 0: if not np.array_equal(np.unique(values), np.arange(max(values) + 1)): err_msg = ( f"Mapping dictionary must start at 0 and be incremental. " f"Found the following mappings {np.unique(values)}, and expected {np.arange(max(values) + 1)}" ) raise ValueError(err_msg)
Python
def as_dict(self) -> dict: """Return data in class as a dict. Returns: dict: data in class """ return dataclasses.asdict(self)
def as_dict(self) -> dict: """Return data in class as a dict. Returns: dict: data in class """ return dataclasses.asdict(self)
Python
def build_labels( boundaries, right: bool, missing_bucket: int, start_special_bucket: int, specials: Optional[Dict[str, list]], ) -> Dict[int, str]: """ Build a nice label dict from a boundary. ```python assert build_labels([1,2,3]) == { 0: '(-inf, 1.0]', 1: '(1.0, 3.0]', 2: '(3.0, 5.0]', 3: '(5.0, inf]' } ``` """ boundaries = np.hstack([-np.inf, boundaries, np.inf]).tolist() labels = {} if right: b_left = "(" b_right = "]" else: b_left = "[" b_right = ")" for i, boundary in enumerate(boundaries): if i != len(boundaries) - 1: labels[i] = f"{b_left}{boundary}, {boundaries[i+1]}{b_right}" # reserve a label for missing values if missing_bucket in labels.keys(): labels[missing_bucket] += " | Missing" else: labels[missing_bucket] = "Missing" # labels for specials if specials: for k, v in specials.items(): labels[start_special_bucket] = "Special: " + str(k) start_special_bucket -= 1 return labels
def build_labels( boundaries, right: bool, missing_bucket: int, start_special_bucket: int, specials: Optional[Dict[str, list]], ) -> Dict[int, str]: """ Build a nice label dict from a boundary. ```python assert build_labels([1,2,3]) == { 0: '(-inf, 1.0]', 1: '(1.0, 3.0]', 2: '(3.0, 5.0]', 3: '(5.0, inf]' } ``` """ boundaries = np.hstack([-np.inf, boundaries, np.inf]).tolist() labels = {} if right: b_left = "(" b_right = "]" else: b_left = "[" b_right = ")" for i, boundary in enumerate(boundaries): if i != len(boundaries) - 1: labels[i] = f"{b_left}{boundary}, {boundaries[i+1]}{b_right}" # reserve a label for missing values if missing_bucket in labels.keys(): labels[missing_bucket] += " | Missing" else: labels[missing_bucket] = "Missing" # labels for specials if specials: for k, v in specials.items(): labels[start_special_bucket] = "Special: " + str(k) start_special_bucket -= 1 return labels
Python
def merge_bucket_mapping(a, b): """ Merges two bucketmappings into one. Assumption here is that one is for prebucketing and the other is for bucketing. In other words, one bucketmapping builds on the other one. """ msg = f"Feature '{a.feature_name}' has variable_type '{a.type}' in a, but '{b.type}' in b." msg += "\nDid you set variable_type correctly in your (pre)bucketing pipeline?" assert a.type == b.type, msg if a.type == "categorical": if b.other_bucket: assert ( b.other_bucket in a.labels.keys() ), f"b.other_bucket set to {b.other_bucket} but not present in any of a's buckets ({a.labels})" if b.missing_bucket: assert ( b.missing_bucket in a.labels.keys() ), f"b.other_bucket set to {b.missing_bucket} but not present in any of a's buckets ({a.labels})" new_boundaries = {} for category, bucket in a.map.items(): new_boundaries[category] = int(b.transform([bucket])) # let's also see where the 'other' category is assigned something_random = "84a088e251d2fa058f37145222e536dc" new_other_bucket = int(b.transform(a.transform([something_random])).tolist()[0]) # if 'other' is put together with an existing bucket # manually assign that. if new_other_bucket in new_boundaries.values(): other_bucket = new_other_bucket else: other_bucket = None # let's see where the missing category is assigned new_missing_bucket = int(b.transform(a.transform([np.nan])).tolist()[0]) if new_missing_bucket in new_boundaries.values(): missing_bucket = new_missing_bucket else: missing_bucket = None return BucketMapping( feature_name=a.feature_name, type=a.type, missing_bucket=missing_bucket, other_bucket=other_bucket, map=new_boundaries, specials=a.specials, ) if a.type == "numerical": # This should hold for numerical maps assert len(a.map) >= len(b.map) # Add infinite edges to boundary map ref_map = [-np.inf] + a.map + [np.inf] new_buckets = list(b.transform(a.transform(ref_map))) # We take a.map and add inf edges, f.e. # [-np.inf, 1,3,5, np.inf] # We can run it through both bucketers and get f.e. # [0,0,1,1,2] # If a.right = True, then we take the max per group # If a.right = False, then we take the min per group # Finally, remove any infinites new_boundaries = [] if a.right: for i, new_bucket in enumerate(new_buckets): if i == 0: if len(new_buckets) == 1: new_boundaries.append(ref_map[0]) continue if i == len(new_buckets) - 1: continue if new_buckets[i + 1] > new_bucket: new_boundaries.append(ref_map[i]) if not a.right: for i, new_bucket in enumerate(new_buckets): if i == 0: if len(new_buckets) == 1: new_boundaries.append(ref_map[0]) continue else: if new_buckets[i - 1] < new_bucket: new_boundaries.append(ref_map[i]) new_boundaries = [x for x in new_boundaries if x != -np.inf] new_boundaries = [x for x in new_boundaries if x != np.inf] return BucketMapping( feature_name=a.feature_name, type=a.type, missing_bucket=a.missing_bucket, map=new_boundaries, specials=a.specials, right=a.right, )
def merge_bucket_mapping(a, b): """ Merges two bucketmappings into one. Assumption here is that one is for prebucketing and the other is for bucketing. In other words, one bucketmapping builds on the other one. """ msg = f"Feature '{a.feature_name}' has variable_type '{a.type}' in a, but '{b.type}' in b." msg += "\nDid you set variable_type correctly in your (pre)bucketing pipeline?" assert a.type == b.type, msg if a.type == "categorical": if b.other_bucket: assert ( b.other_bucket in a.labels.keys() ), f"b.other_bucket set to {b.other_bucket} but not present in any of a's buckets ({a.labels})" if b.missing_bucket: assert ( b.missing_bucket in a.labels.keys() ), f"b.other_bucket set to {b.missing_bucket} but not present in any of a's buckets ({a.labels})" new_boundaries = {} for category, bucket in a.map.items(): new_boundaries[category] = int(b.transform([bucket])) # let's also see where the 'other' category is assigned something_random = "84a088e251d2fa058f37145222e536dc" new_other_bucket = int(b.transform(a.transform([something_random])).tolist()[0]) # if 'other' is put together with an existing bucket # manually assign that. if new_other_bucket in new_boundaries.values(): other_bucket = new_other_bucket else: other_bucket = None # let's see where the missing category is assigned new_missing_bucket = int(b.transform(a.transform([np.nan])).tolist()[0]) if new_missing_bucket in new_boundaries.values(): missing_bucket = new_missing_bucket else: missing_bucket = None return BucketMapping( feature_name=a.feature_name, type=a.type, missing_bucket=missing_bucket, other_bucket=other_bucket, map=new_boundaries, specials=a.specials, ) if a.type == "numerical": # This should hold for numerical maps assert len(a.map) >= len(b.map) # Add infinite edges to boundary map ref_map = [-np.inf] + a.map + [np.inf] new_buckets = list(b.transform(a.transform(ref_map))) # We take a.map and add inf edges, f.e. # [-np.inf, 1,3,5, np.inf] # We can run it through both bucketers and get f.e. # [0,0,1,1,2] # If a.right = True, then we take the max per group # If a.right = False, then we take the min per group # Finally, remove any infinites new_boundaries = [] if a.right: for i, new_bucket in enumerate(new_buckets): if i == 0: if len(new_buckets) == 1: new_boundaries.append(ref_map[0]) continue if i == len(new_buckets) - 1: continue if new_buckets[i + 1] > new_bucket: new_boundaries.append(ref_map[i]) if not a.right: for i, new_bucket in enumerate(new_buckets): if i == 0: if len(new_buckets) == 1: new_boundaries.append(ref_map[0]) continue else: if new_buckets[i - 1] < new_bucket: new_boundaries.append(ref_map[i]) new_boundaries = [x for x in new_boundaries if x != -np.inf] new_boundaries = [x for x in new_boundaries if x != np.inf] return BucketMapping( feature_name=a.feature_name, type=a.type, missing_bucket=a.missing_bucket, map=new_boundaries, specials=a.specials, right=a.right, )
Python
def fit(self, X, y=None): """ Fit the transformer. Here to be compliant with the sklearn API, does not fit anything. """ # scikit-learn requires checking that X has same shape on transform # this is because scikit-learn is still positional based (no column names used) self.n_train_features_ = X.shape[1] return self
def fit(self, X, y=None): """ Fit the transformer. Here to be compliant with the sklearn API, does not fit anything. """ # scikit-learn requires checking that X has same shape on transform # this is because scikit-learn is still positional based (no column names used) self.n_train_features_ = X.shape[1] return self
Python
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"requires_fit": False}
def _more_tags(self): """ Estimator tags are annotations of estimators that allow programmatic inspection of their capabilities. See https://scikit-learn.org/stable/developers/develop.html#estimator-tags """ # noqa return {"requires_fit": False}
Python
def plot_prebucket_table(prebucket_table, column="", line="", format=None, scale=None, width=None, height=None): """ Given the prebucketed data, plot the pre-buckets. Args: prebucket_table (pd.DataFrame): the table of the prebucketed data column (str): The column to plot line (str): The line to plot on the secondary axis format (str): The format of the image, e.g. 'png'. The default returns a plotly fig scale: If format is specified, the scale of the image width: If format is specified, the width of the image height: If format is specified, the image of the image Returns: fig of desired format """ fig = make_plot_figure(prebucket_table, line) fig.update_layout(title=f"pre-buckets: {column}".strip()) fig.update_layout(xaxis_title=f"{column} pre-buckets".strip()) if format: img_bytes = fig.to_image(format=format, scale=scale, width=width, height=height) fig = Image(img_bytes) return fig
def plot_prebucket_table(prebucket_table, column="", line="", format=None, scale=None, width=None, height=None): """ Given the prebucketed data, plot the pre-buckets. Args: prebucket_table (pd.DataFrame): the table of the prebucketed data column (str): The column to plot line (str): The line to plot on the secondary axis format (str): The format of the image, e.g. 'png'. The default returns a plotly fig scale: If format is specified, the scale of the image width: If format is specified, the width of the image height: If format is specified, the image of the image Returns: fig of desired format """ fig = make_plot_figure(prebucket_table, line) fig.update_layout(title=f"pre-buckets: {column}".strip()) fig.update_layout(xaxis_title=f"{column} pre-buckets".strip()) if format: img_bytes = fig.to_image(format=format, scale=scale, width=width, height=height) fig = Image(img_bytes) return fig
Python
def draw_embedding(G, layout, emb, embedded_graph=None, interaction_edges=None, chain_color=None, unused_color=(0.9,0.9,0.9,1.0), cmap=None, show_labels=False, overlapped_embedding=False, **kwargs): """Draws an embedding onto the graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph The graph to be drawn layout : dict A dict of coordinates associated with each node in G. Should be of the form {node: coordinate, ...}. Coordinates will be treated as vectors, and should all have the same length. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. cmap : str or matplotlib colormap (optional, default None) A matplotlib colormap for coloring of chains. Only used if chain_color is None. unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. overlapped_embedding: boolean (optional, default False) If overlapped_embedding is True, then chains in emb may overlap (contain the same vertices in G), and the drawing will display these overlaps as concentric circles. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ try: import matplotlib.pyplot as plt import matplotlib as mpl except ImportError: raise ImportError("Matplotlib and numpy required for draw_chimera()") if nx.utils.is_string_like(unused_color): from matplotlib.colors import colorConverter alpha = kwargs.get('alpha', 1.0) unused_color = colorConverter.to_rgba(unused_color, alpha) if chain_color is None: import matplotlib.cm n = max(1., len(emb) - 1.) if cmap: color = matplotlib.cm.get_cmap(cmap) else: color = distinguishable_color_map(int(n+1)) var_i = {v: i for i, v in enumerate(emb)} chain_color = {v: color(i/n) for i, v in enumerate(emb)} if overlapped_embedding: bags = compute_bags(G, emb) base_node_size = kwargs.get('node_size', 100) node_size_dict = {v: base_node_size for v in G.nodes()} G, emb, interaction_edges = unoverlapped_embedding(G, emb, interaction_edges) for node, data in G.nodes(data=True): if 'dummy' in data: v, x = node layout[node] = layout[v] for v, bag in bags.items(): for i, x in enumerate(bag): node_size_dict[(v, x)] = base_node_size * (len(bag) - i) ** 2 kwargs['node_size'] = [node_size_dict[p] for p in G.nodes()] qlabel = {q: v for v, chain in emb.items() for q in chain} edgelist = [] edge_color = [] background_edgelist = [] background_edge_color = [] if interaction_edges is not None: interactions = nx.Graph() interactions.add_edges_from(interaction_edges) def show(p, q, u, v): return interactions.has_edge(p, q) elif embedded_graph is not None: def show(p, q, u, v): return embedded_graph.has_edge(u, v) else: def show(p, q, u, v): return True for (p, q) in G.edges(): u = qlabel.get(p) v = qlabel.get(q) if u is None or v is None: ec = unused_color elif u == v: ec = chain_color.get(u) elif show(p, q, u, v): ec = (0, 0, 0, 1) else: ec = unused_color if ec == unused_color: background_edgelist.append((p, q)) background_edge_color.append(ec) elif ec is not None: edgelist.append((p, q)) edge_color.append(ec) nodelist = [] node_color = [] for p in G.nodes(): u = qlabel.get(p) if u is None: pc = unused_color else: pc = chain_color.get(u) if pc is not None: nodelist.append(p) node_color.append(pc) labels = {} if show_labels: if overlapped_embedding: node_labels = {q: [] for q in bags.keys()} node_index = {p: i for i, p in enumerate(G.nodes())} for v in emb.keys(): v_labelled = False chain = emb[v] for node in chain: (q, _) = node if len(bags[q]) == 1: # if there's a node that only has this label, use that labels[q] = str(v) v_labelled = True break if not v_labelled and chain: # otherwise, pick a random node for this label node = random.choice(list(chain)) (q, _) = node node_labels[q].append(v) for q, label_vars in node_labels.items(): x, y = layout[q] # TODO: find a better way of placing labels around the outside of nodes. # Currently, if the graph is resized, labels will appear at a strange distance from the vertices. # To fix this, the "scale" value below, rather than being a fixed constant, should be determined using # both the size of the nodes and the size of the coordinate space of the graph. scale = 0.1 # spread the labels evenly around the node. for i, v in enumerate(label_vars): theta = 2 * math.pi * i / len(label_vars) new_x = x + scale * math.sin(theta) new_y = y + scale * math.cos(theta) plt.text(new_x, new_y, str(v), color=node_color[node_index[(q, v)]], horizontalalignment='center', verticalalignment='center') else: for v in emb.keys(): c = emb[v] labels[list(c)[0]] = str(v) # draw the background (unused) graph first if unused_color is not None: draw(G, layout, nodelist=nodelist, edgelist=background_edgelist, node_color=node_color, edge_color=background_edge_color, **kwargs) draw(G, layout, nodelist=nodelist, edgelist=edgelist, node_color=node_color, edge_color=edge_color, labels=labels, **kwargs)
def draw_embedding(G, layout, emb, embedded_graph=None, interaction_edges=None, chain_color=None, unused_color=(0.9,0.9,0.9,1.0), cmap=None, show_labels=False, overlapped_embedding=False, **kwargs): """Draws an embedding onto the graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph The graph to be drawn layout : dict A dict of coordinates associated with each node in G. Should be of the form {node: coordinate, ...}. Coordinates will be treated as vectors, and should all have the same length. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. cmap : str or matplotlib colormap (optional, default None) A matplotlib colormap for coloring of chains. Only used if chain_color is None. unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. overlapped_embedding: boolean (optional, default False) If overlapped_embedding is True, then chains in emb may overlap (contain the same vertices in G), and the drawing will display these overlaps as concentric circles. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ try: import matplotlib.pyplot as plt import matplotlib as mpl except ImportError: raise ImportError("Matplotlib and numpy required for draw_chimera()") if nx.utils.is_string_like(unused_color): from matplotlib.colors import colorConverter alpha = kwargs.get('alpha', 1.0) unused_color = colorConverter.to_rgba(unused_color, alpha) if chain_color is None: import matplotlib.cm n = max(1., len(emb) - 1.) if cmap: color = matplotlib.cm.get_cmap(cmap) else: color = distinguishable_color_map(int(n+1)) var_i = {v: i for i, v in enumerate(emb)} chain_color = {v: color(i/n) for i, v in enumerate(emb)} if overlapped_embedding: bags = compute_bags(G, emb) base_node_size = kwargs.get('node_size', 100) node_size_dict = {v: base_node_size for v in G.nodes()} G, emb, interaction_edges = unoverlapped_embedding(G, emb, interaction_edges) for node, data in G.nodes(data=True): if 'dummy' in data: v, x = node layout[node] = layout[v] for v, bag in bags.items(): for i, x in enumerate(bag): node_size_dict[(v, x)] = base_node_size * (len(bag) - i) ** 2 kwargs['node_size'] = [node_size_dict[p] for p in G.nodes()] qlabel = {q: v for v, chain in emb.items() for q in chain} edgelist = [] edge_color = [] background_edgelist = [] background_edge_color = [] if interaction_edges is not None: interactions = nx.Graph() interactions.add_edges_from(interaction_edges) def show(p, q, u, v): return interactions.has_edge(p, q) elif embedded_graph is not None: def show(p, q, u, v): return embedded_graph.has_edge(u, v) else: def show(p, q, u, v): return True for (p, q) in G.edges(): u = qlabel.get(p) v = qlabel.get(q) if u is None or v is None: ec = unused_color elif u == v: ec = chain_color.get(u) elif show(p, q, u, v): ec = (0, 0, 0, 1) else: ec = unused_color if ec == unused_color: background_edgelist.append((p, q)) background_edge_color.append(ec) elif ec is not None: edgelist.append((p, q)) edge_color.append(ec) nodelist = [] node_color = [] for p in G.nodes(): u = qlabel.get(p) if u is None: pc = unused_color else: pc = chain_color.get(u) if pc is not None: nodelist.append(p) node_color.append(pc) labels = {} if show_labels: if overlapped_embedding: node_labels = {q: [] for q in bags.keys()} node_index = {p: i for i, p in enumerate(G.nodes())} for v in emb.keys(): v_labelled = False chain = emb[v] for node in chain: (q, _) = node if len(bags[q]) == 1: # if there's a node that only has this label, use that labels[q] = str(v) v_labelled = True break if not v_labelled and chain: # otherwise, pick a random node for this label node = random.choice(list(chain)) (q, _) = node node_labels[q].append(v) for q, label_vars in node_labels.items(): x, y = layout[q] # TODO: find a better way of placing labels around the outside of nodes. # Currently, if the graph is resized, labels will appear at a strange distance from the vertices. # To fix this, the "scale" value below, rather than being a fixed constant, should be determined using # both the size of the nodes and the size of the coordinate space of the graph. scale = 0.1 # spread the labels evenly around the node. for i, v in enumerate(label_vars): theta = 2 * math.pi * i / len(label_vars) new_x = x + scale * math.sin(theta) new_y = y + scale * math.cos(theta) plt.text(new_x, new_y, str(v), color=node_color[node_index[(q, v)]], horizontalalignment='center', verticalalignment='center') else: for v in emb.keys(): c = emb[v] labels[list(c)[0]] = str(v) # draw the background (unused) graph first if unused_color is not None: draw(G, layout, nodelist=nodelist, edgelist=background_edgelist, node_color=node_color, edge_color=background_edge_color, **kwargs) draw(G, layout, nodelist=nodelist, edgelist=edgelist, node_color=node_color, edge_color=edge_color, labels=labels, **kwargs)
Python
def maximum_cut(G, sampler=None, **sampler_args): """Returns an approximate maximum cut. Defines an Ising problem with ground states corresponding to a maximum cut and uses the sampler to sample from it. A maximum cut is a subset S of the vertices of G such that the number of edges between S and the complementary subset is as large as possible. Parameters ---------- G : NetworkX graph The graph on which to find a maximum cut. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- S : set A maximum cut of G. Example ------- This example uses a sampler from `dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut for a graph of a Chimera unit cell created using the `chimera_graph()` function. >>> import dimod ... >>> sampler = dimod.SimulatedAnnealingSampler() >>> G = dnx.chimera_graph(1, 1, 4) >>> cut = dnx.maximum_cut(G, sampler) Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # In order to form the Ising problem, we want to increase the # energy by 1 for each edge between two nodes of the same color. # The linear biases can all be 0. h = {v: 0. for v in G} J = {(u, v): 1 for u, v in G.edges} # draw the lowest energy sample from the sampler response = sampler.sample_ising(h, J, **sampler_args) sample = next(iter(response)) return set(v for v in G if sample[v] >= 0)
def maximum_cut(G, sampler=None, **sampler_args): """Returns an approximate maximum cut. Defines an Ising problem with ground states corresponding to a maximum cut and uses the sampler to sample from it. A maximum cut is a subset S of the vertices of G such that the number of edges between S and the complementary subset is as large as possible. Parameters ---------- G : NetworkX graph The graph on which to find a maximum cut. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- S : set A maximum cut of G. Example ------- This example uses a sampler from `dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut for a graph of a Chimera unit cell created using the `chimera_graph()` function. >>> import dimod ... >>> sampler = dimod.SimulatedAnnealingSampler() >>> G = dnx.chimera_graph(1, 1, 4) >>> cut = dnx.maximum_cut(G, sampler) Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # In order to form the Ising problem, we want to increase the # energy by 1 for each edge between two nodes of the same color. # The linear biases can all be 0. h = {v: 0. for v in G} J = {(u, v): 1 for u, v in G.edges} # draw the lowest energy sample from the sampler response = sampler.sample_ising(h, J, **sampler_args) sample = next(iter(response)) return set(v for v in G if sample[v] >= 0)
Python
def weighted_maximum_cut(G, sampler=None, **sampler_args): """Returns an approximate weighted maximum cut. Defines an Ising problem with ground states corresponding to a weighted maximum cut and uses the sampler to sample from it. A weighted maximum cut is a subset S of the vertices of G that maximizes the sum of the edge weights between S and its complementary subset. Parameters ---------- G : NetworkX graph The graph on which to find a weighted maximum cut. Each edge in G should have a numeric `weight` attribute. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- S : set A maximum cut of G. Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # In order to form the Ising problem, we want to increase the # energy by 1 for each edge between two nodes of the same color. # The linear biases can all be 0. h = {v: 0. for v in G} try: J = {(u, v): G[u][v]['weight'] for u, v in G.edges} except KeyError: raise DWaveNetworkXException("edges must have 'weight' attribute") # draw the lowest energy sample from the sampler response = sampler.sample_ising(h, J, **sampler_args) sample = next(iter(response)) return set(v for v in G if sample[v] >= 0)
def weighted_maximum_cut(G, sampler=None, **sampler_args): """Returns an approximate weighted maximum cut. Defines an Ising problem with ground states corresponding to a weighted maximum cut and uses the sampler to sample from it. A weighted maximum cut is a subset S of the vertices of G that maximizes the sum of the edge weights between S and its complementary subset. Parameters ---------- G : NetworkX graph The graph on which to find a weighted maximum cut. Each edge in G should have a numeric `weight` attribute. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- S : set A maximum cut of G. Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # In order to form the Ising problem, we want to increase the # energy by 1 for each edge between two nodes of the same color. # The linear biases can all be 0. h = {v: 0. for v in G} try: J = {(u, v): G[u][v]['weight'] for u, v in G.edges} except KeyError: raise DWaveNetworkXException("edges must have 'weight' attribute") # draw the lowest energy sample from the sampler response = sampler.sample_ising(h, J, **sampler_args) sample = next(iter(response)) return set(v for v in G if sample[v] >= 0)
Python
def partition(G, num_partitions=2, sampler=None, **sampler_args): """Returns an approximate k-partition of G. Defines an CQM with ground states corresponding to a balanced k-partition of G and uses the sampler to sample from it. A k-partition is a collection of k subsets of the vertices of G such that each vertex is in exactly one subset, and the number of edges between vertices in different subsets is as small as possible. If G is a weighted graph, the sum of weights over those edges are minimized. Parameters ---------- G : NetworkX graph The graph to partition. num_partitions : int, optional (default 2) The number of subsets in the desired partition. sampler : A constrained quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Model, with or without constraints. The sampler is expected to have a 'sample_cqm' method. A sampler is expected to return an iterable of samples, in order of increasing energy. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- node_partition : dict The partition as a dictionary mapping each node to subsets labelled as integers 0, 1, 2, ... num_partitions. Example ------- This example uses a sampler from `dimod <https://github.com/dwavesystems/dimod>`_ to find a 2-partition for a graph of a Chimera unit cell created using the `chimera_graph()` function. >>> import dimod >>> sampler = dimod.ExactCQMSolver() >>> G = dnx.chimera_graph(1, 1, 4) >>> partitions = dnx.partition(G, sampler=sampler) Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ if not len(G.nodes): return {} cqm = graph_partition_cqm(G, num_partitions) # Solve the problem using the CQM solver response = sampler.sample_cqm(cqm, **sampler_args) # Consider only results satisfying all constraints possible_partitions = response.filter(lambda d: d.is_feasible) if not possible_partitions: raise RuntimeError("No feasible solution could be found for this problem instance.") # Reinterpret result as partition assignment over nodes indicators = (key for key, value in possible_partitions.first.sample.items() if math.isclose(value, 1.)) node_partition = {key[0]: key[1] for key in indicators} return node_partition
def partition(G, num_partitions=2, sampler=None, **sampler_args): """Returns an approximate k-partition of G. Defines an CQM with ground states corresponding to a balanced k-partition of G and uses the sampler to sample from it. A k-partition is a collection of k subsets of the vertices of G such that each vertex is in exactly one subset, and the number of edges between vertices in different subsets is as small as possible. If G is a weighted graph, the sum of weights over those edges are minimized. Parameters ---------- G : NetworkX graph The graph to partition. num_partitions : int, optional (default 2) The number of subsets in the desired partition. sampler : A constrained quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Model, with or without constraints. The sampler is expected to have a 'sample_cqm' method. A sampler is expected to return an iterable of samples, in order of increasing energy. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- node_partition : dict The partition as a dictionary mapping each node to subsets labelled as integers 0, 1, 2, ... num_partitions. Example ------- This example uses a sampler from `dimod <https://github.com/dwavesystems/dimod>`_ to find a 2-partition for a graph of a Chimera unit cell created using the `chimera_graph()` function. >>> import dimod >>> sampler = dimod.ExactCQMSolver() >>> G = dnx.chimera_graph(1, 1, 4) >>> partitions = dnx.partition(G, sampler=sampler) Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ if not len(G.nodes): return {} cqm = graph_partition_cqm(G, num_partitions) # Solve the problem using the CQM solver response = sampler.sample_cqm(cqm, **sampler_args) # Consider only results satisfying all constraints possible_partitions = response.filter(lambda d: d.is_feasible) if not possible_partitions: raise RuntimeError("No feasible solution could be found for this problem instance.") # Reinterpret result as partition assignment over nodes indicators = (key for key, value in possible_partitions.first.sample.items() if math.isclose(value, 1.)) node_partition = {key[0]: key[1] for key in indicators} return node_partition
Python
def graph_partition_cqm(G, num_partitions): """Find a constrained quadratic model for the graph's partitions. Defines an CQM with ground states corresponding to a balanced k-partition of G and uses the sampler to sample from it. A k-partition is a collection of k subsets of the vertices of G such that each vertex is in exactly one subset, and the number of edges between vertices in different subsets is as small as possible. If G is a weighted graph, the sum of weights over those edges are minimized. Parameters ---------- G : NetworkX graph The graph to partition. num_partitions : int The number of subsets in the desired partition. Returns ------- cqm : :class:`dimod.ConstrainedQuadraticModel` A constrained quadratic model with ground states corresponding to a partition problem. The nodes of `G` are discrete logical variables of the CQM, where the cases are the different partitions the node can be assigned to. The objective is given as the number of edges connecting nodes in different partitions. """ partition_size = G.number_of_nodes()/num_partitions partitions = range(num_partitions) cqm = dimod.ConstrainedQuadraticModel() # Variables will be added using the discrete method in CQM x = {vk: dimod.Binary(vk) for vk in itertools.product(G.nodes, partitions)} for v in G.nodes: cqm.add_discrete(((v, k) for k in partitions), label=v) if not math.isclose(partition_size, int(partition_size)): # if number of nodes don't divide into num_partitions, # accept partitions of size ceil() or floor() floor, ceil = int(partition_size), int(partition_size+1) for k in partitions: cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) >= floor, label='equal_partition_low_%s' %k) cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) <= ceil, label='equal_partition_high_%s' %k) else: # each partition must have partition_size elements for k in partitions: cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) == int(partition_size), label='equal_partition_%s' %k) cuts = 0 for (u, v, d) in G.edges(data=True): for k in partitions: w = d.get('weight',1) cuts += w * x[u,k] * x[v,k] if cuts: cqm.set_objective(-cuts) return cqm
def graph_partition_cqm(G, num_partitions): """Find a constrained quadratic model for the graph's partitions. Defines an CQM with ground states corresponding to a balanced k-partition of G and uses the sampler to sample from it. A k-partition is a collection of k subsets of the vertices of G such that each vertex is in exactly one subset, and the number of edges between vertices in different subsets is as small as possible. If G is a weighted graph, the sum of weights over those edges are minimized. Parameters ---------- G : NetworkX graph The graph to partition. num_partitions : int The number of subsets in the desired partition. Returns ------- cqm : :class:`dimod.ConstrainedQuadraticModel` A constrained quadratic model with ground states corresponding to a partition problem. The nodes of `G` are discrete logical variables of the CQM, where the cases are the different partitions the node can be assigned to. The objective is given as the number of edges connecting nodes in different partitions. """ partition_size = G.number_of_nodes()/num_partitions partitions = range(num_partitions) cqm = dimod.ConstrainedQuadraticModel() # Variables will be added using the discrete method in CQM x = {vk: dimod.Binary(vk) for vk in itertools.product(G.nodes, partitions)} for v in G.nodes: cqm.add_discrete(((v, k) for k in partitions), label=v) if not math.isclose(partition_size, int(partition_size)): # if number of nodes don't divide into num_partitions, # accept partitions of size ceil() or floor() floor, ceil = int(partition_size), int(partition_size+1) for k in partitions: cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) >= floor, label='equal_partition_low_%s' %k) cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) <= ceil, label='equal_partition_high_%s' %k) else: # each partition must have partition_size elements for k in partitions: cqm.add_constraint(dimod.quicksum((x[u, k] for u in G.nodes)) == int(partition_size), label='equal_partition_%s' %k) cuts = 0 for (u, v, d) in G.edges(data=True): for k in partitions: w = d.get('weight',1) cuts += w * x[u,k] * x[v,k] if cuts: cqm.set_objective(-cuts) return cqm
Python
def sample_markov_network(MN, sampler=None, fixed_variables=None, return_sampleset=False, **sampler_args): """Samples from a markov network using the provided sampler. Parameters ---------- G : NetworkX graph A Markov Network as returned by :func:`.markov_network` sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. fixed_variables : dict A dictionary of variable assignments to be fixed in the markov network. return_sampleset : bool (optional, default=False) If True, returns a :obj:`dimod.SampleSet` rather than a list of samples. **sampler_args Additional keyword parameters are passed to the sampler. Returns ------- samples : list[dict]/:obj:`dimod.SampleSet` A list of samples ordered from low-to-high energy or a sample set. Examples -------- >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler) >>> samples[0] # doctest: +SKIP {'a': 0, 'b': 0} >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler, return_sampleset=True) >>> samples.first # doctest: +SKIP Sample(sample={'a': 0, 'b': 0}, energy=-1.0, num_occurrences=1) >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}, ... ('b', 'c'): {(0, 0): -9, ... (0, 1): 1.2, ... (1, 0): 7.2, ... (1, 1): 5}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler, fixed_variables={'b': 0}) >>> samples[0] # doctest: +SKIP {'a': 0, 'c': 0, 'b': 0} Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ bqm = markov_network_bqm(MN) if fixed_variables: # we can modify in-place since we just made it bqm.fix_variables(fixed_variables) sampleset = sampler.sample(bqm, **sampler_args) if fixed_variables: # add the variables back in sampleset = dimod.append_variables(sampleset, fixed_variables) if return_sampleset: return sampleset else: return list(map(dict, sampleset.samples()))
def sample_markov_network(MN, sampler=None, fixed_variables=None, return_sampleset=False, **sampler_args): """Samples from a markov network using the provided sampler. Parameters ---------- G : NetworkX graph A Markov Network as returned by :func:`.markov_network` sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. fixed_variables : dict A dictionary of variable assignments to be fixed in the markov network. return_sampleset : bool (optional, default=False) If True, returns a :obj:`dimod.SampleSet` rather than a list of samples. **sampler_args Additional keyword parameters are passed to the sampler. Returns ------- samples : list[dict]/:obj:`dimod.SampleSet` A list of samples ordered from low-to-high energy or a sample set. Examples -------- >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler) >>> samples[0] # doctest: +SKIP {'a': 0, 'b': 0} >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler, return_sampleset=True) >>> samples.first # doctest: +SKIP Sample(sample={'a': 0, 'b': 0}, energy=-1.0, num_occurrences=1) >>> import dimod ... >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}, ... ('b', 'c'): {(0, 0): -9, ... (0, 1): 1.2, ... (1, 0): 7.2, ... (1, 1): 5}} >>> MN = dnx.markov_network(potentials) >>> sampler = dimod.ExactSolver() >>> samples = dnx.sample_markov_network(MN, sampler, fixed_variables={'b': 0}) >>> samples[0] # doctest: +SKIP {'a': 0, 'c': 0, 'b': 0} Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ bqm = markov_network_bqm(MN) if fixed_variables: # we can modify in-place since we just made it bqm.fix_variables(fixed_variables) sampleset = sampler.sample(bqm, **sampler_args) if fixed_variables: # add the variables back in sampleset = dimod.append_variables(sampleset, fixed_variables) if return_sampleset: return sampleset else: return list(map(dict, sampleset.samples()))
Python
def request_from_github(abort_code: int = 418) -> Callable: """Provide decorator to handle request from github on the webhook.""" def decorator(f): """Decorate the function to check if a request is a GitHub hook request.""" @wraps(f) def decorated_function(*args, **kwargs): if request.method != 'POST': return 'OK' else: # Do initial validations on required headers if 'X-Github-Event' not in request.headers: g.log.critical('X-Github-Event not in headers!') abort(abort_code) if 'X-Github-Delivery' not in request.headers: g.log.critical('X-Github-Delivery not in headers!') abort(abort_code) if 'X-Hub-Signature' not in request.headers: g.log.critical('X-Hub-Signature not in headers!') abort(abort_code) if not request.is_json: g.log.critical('Request is not JSON!') abort(abort_code) if 'User-Agent' not in request.headers: g.log.critical('User-Agent not in headers!') abort(abort_code) ua = request.headers.get('User-Agent') if not ua.startswith('GitHub-Hookshot/'): g.log.critical('User-Agent does not begin with Github-Hookshot/!') abort(abort_code) request_ip = ip_address(u'{0}'.format(request.remote_addr)) meta_json = requests.get('https://api.github.com/meta').json() hook_blocks = meta_json['hooks'] # Check if the POST request is from GitHub for block in hook_blocks: if ip_address(request_ip) in ip_network(block): break else: g.log.warning("Unauthorized attempt to deploy by IP {ip}".format(ip=request_ip)) abort(abort_code) return f(*args, **kwargs) return decorated_function return decorator
def request_from_github(abort_code: int = 418) -> Callable: """Provide decorator to handle request from github on the webhook.""" def decorator(f): """Decorate the function to check if a request is a GitHub hook request.""" @wraps(f) def decorated_function(*args, **kwargs): if request.method != 'POST': return 'OK' else: # Do initial validations on required headers if 'X-Github-Event' not in request.headers: g.log.critical('X-Github-Event not in headers!') abort(abort_code) if 'X-Github-Delivery' not in request.headers: g.log.critical('X-Github-Delivery not in headers!') abort(abort_code) if 'X-Hub-Signature' not in request.headers: g.log.critical('X-Hub-Signature not in headers!') abort(abort_code) if not request.is_json: g.log.critical('Request is not JSON!') abort(abort_code) if 'User-Agent' not in request.headers: g.log.critical('User-Agent not in headers!') abort(abort_code) ua = request.headers.get('User-Agent') if not ua.startswith('GitHub-Hookshot/'): g.log.critical('User-Agent does not begin with Github-Hookshot/!') abort(abort_code) request_ip = ip_address(u'{0}'.format(request.remote_addr)) meta_json = requests.get('https://api.github.com/meta').json() hook_blocks = meta_json['hooks'] # Check if the POST request is from GitHub for block in hook_blocks: if ip_address(request_ip) in ip_network(block): break else: g.log.warning("Unauthorized attempt to deploy by IP {ip}".format(ip=request_ip)) abort(abort_code) return f(*args, **kwargs) return decorated_function return decorator
Python
def decorator(f): """Decorate the function to check if a request is a GitHub hook request.""" @wraps(f) def decorated_function(*args, **kwargs): if request.method != 'POST': return 'OK' else: # Do initial validations on required headers if 'X-Github-Event' not in request.headers: g.log.critical('X-Github-Event not in headers!') abort(abort_code) if 'X-Github-Delivery' not in request.headers: g.log.critical('X-Github-Delivery not in headers!') abort(abort_code) if 'X-Hub-Signature' not in request.headers: g.log.critical('X-Hub-Signature not in headers!') abort(abort_code) if not request.is_json: g.log.critical('Request is not JSON!') abort(abort_code) if 'User-Agent' not in request.headers: g.log.critical('User-Agent not in headers!') abort(abort_code) ua = request.headers.get('User-Agent') if not ua.startswith('GitHub-Hookshot/'): g.log.critical('User-Agent does not begin with Github-Hookshot/!') abort(abort_code) request_ip = ip_address(u'{0}'.format(request.remote_addr)) meta_json = requests.get('https://api.github.com/meta').json() hook_blocks = meta_json['hooks'] # Check if the POST request is from GitHub for block in hook_blocks: if ip_address(request_ip) in ip_network(block): break else: g.log.warning("Unauthorized attempt to deploy by IP {ip}".format(ip=request_ip)) abort(abort_code) return f(*args, **kwargs) return decorated_function
def decorator(f): """Decorate the function to check if a request is a GitHub hook request.""" @wraps(f) def decorated_function(*args, **kwargs): if request.method != 'POST': return 'OK' else: # Do initial validations on required headers if 'X-Github-Event' not in request.headers: g.log.critical('X-Github-Event not in headers!') abort(abort_code) if 'X-Github-Delivery' not in request.headers: g.log.critical('X-Github-Delivery not in headers!') abort(abort_code) if 'X-Hub-Signature' not in request.headers: g.log.critical('X-Hub-Signature not in headers!') abort(abort_code) if not request.is_json: g.log.critical('Request is not JSON!') abort(abort_code) if 'User-Agent' not in request.headers: g.log.critical('User-Agent not in headers!') abort(abort_code) ua = request.headers.get('User-Agent') if not ua.startswith('GitHub-Hookshot/'): g.log.critical('User-Agent does not begin with Github-Hookshot/!') abort(abort_code) request_ip = ip_address(u'{0}'.format(request.remote_addr)) meta_json = requests.get('https://api.github.com/meta').json() hook_blocks = meta_json['hooks'] # Check if the POST request is from GitHub for block in hook_blocks: if ip_address(request_ip) in ip_network(block): break else: g.log.warning("Unauthorized attempt to deploy by IP {ip}".format(ip=request_ip)) abort(abort_code) return f(*args, **kwargs) return decorated_function
Python
def is_valid_signature(x_hub_signature, data, private_key): """ Re-check if the GitHub hook request got valid signature. :param x_hub_signature: Signature to check :type x_hub_signature: str :param data: Signature's data :type data: bytearray :param private_key: Signature's token :type private_key: str """ hash_algorithm, github_signature = x_hub_signature.split('=', 1) algorithm = hashlib.__dict__.get(hash_algorithm) encoded_key = bytes(private_key, 'latin-1') mac = hmac.new(encoded_key, msg=data, digestmod=algorithm) return hmac.compare_digest(mac.hexdigest(), github_signature)
def is_valid_signature(x_hub_signature, data, private_key): """ Re-check if the GitHub hook request got valid signature. :param x_hub_signature: Signature to check :type x_hub_signature: str :param data: Signature's data :type data: bytearray :param private_key: Signature's token :type private_key: str """ hash_algorithm, github_signature = x_hub_signature.split('=', 1) algorithm = hashlib.__dict__.get(hash_algorithm) encoded_key = bytes(private_key, 'latin-1') mac = hmac.new(encoded_key, msg=data, digestmod=algorithm) return hmac.compare_digest(mac.hexdigest(), github_signature)
Python
def deploy(): """Deploy the GitHub request to the test platform.""" from run import app abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": g.log.info('deploy endpoint pinged!') return json.dumps({'msg': 'Hi!'}) if event != "push": g.log.info('deploy endpoint received unaccepted push request!') return json.dumps({'msg': "Wrong event type"}) x_hub_signature = request.headers.get('X-Hub-Signature') # webhook content type should be application/json for request.data to have the payload # request.data is empty in case of x-www-form-urlencoded if not is_valid_signature(x_hub_signature, request.data, g.github['deploy_key']): g.log.warning('Deploy signature failed: {sig}'.format(sig=x_hub_signature)) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning('Deploy payload is empty: {payload}'.format(payload=payload)) abort(abort_code) if payload['ref'] != 'refs/heads/master': return json.dumps({'msg': 'Not master; ignoring'}) # Update code try: repo = Repo(app.config['INSTALL_FOLDER']) except InvalidGitRepositoryError: return json.dumps({'msg': 'Folder is not a valid git directory'}) try: origin = repo.remote('origin') except ValueError: return json.dumps({'msg': 'Remote origin does not exist'}) fetch_info = origin.fetch() if len(fetch_info) == 0: return json.dumps({'msg': "Didn't fetch any information from remote!"}) # Pull code (finally) pull_info = origin.pull() if len(pull_info) == 0: return json.dumps({'msg': "Didn't pull any information from remote!"}) if pull_info[0].flags > 128: return json.dumps({'msg': "Didn't pull any information from remote!"}) commit_hash = pull_info[0].commit.hexsha build_commit = 'build_commit = "{commit}"'.format(commit=commit_hash) with open('build_commit.py', 'w') as f: f.write(build_commit) # Update runCI run_ci_repo = path.join(app.config['INSTALL_FOLDER'], 'install', 'ci-vm', 'ci-linux', 'ci', 'runCI') run_ci_nfs = path.join(app.config['SAMPLE_REPOSITORY'], 'vm_data', app.config['KVM_LINUX_NAME'], 'runCI') copyfile(run_ci_repo, run_ci_nfs) # Reload platform service g.log.info('Platform upgraded to commit {commit}'.format(commit=commit_hash)) subprocess.Popen(["sudo", "service", "platform", "reload"]) g.log.info('Sample platform synced with Github!') return json.dumps({'msg': 'Platform upgraded to commit {commit}'.format(commit=commit_hash)})
def deploy(): """Deploy the GitHub request to the test platform.""" from run import app abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": g.log.info('deploy endpoint pinged!') return json.dumps({'msg': 'Hi!'}) if event != "push": g.log.info('deploy endpoint received unaccepted push request!') return json.dumps({'msg': "Wrong event type"}) x_hub_signature = request.headers.get('X-Hub-Signature') # webhook content type should be application/json for request.data to have the payload # request.data is empty in case of x-www-form-urlencoded if not is_valid_signature(x_hub_signature, request.data, g.github['deploy_key']): g.log.warning('Deploy signature failed: {sig}'.format(sig=x_hub_signature)) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning('Deploy payload is empty: {payload}'.format(payload=payload)) abort(abort_code) if payload['ref'] != 'refs/heads/master': return json.dumps({'msg': 'Not master; ignoring'}) # Update code try: repo = Repo(app.config['INSTALL_FOLDER']) except InvalidGitRepositoryError: return json.dumps({'msg': 'Folder is not a valid git directory'}) try: origin = repo.remote('origin') except ValueError: return json.dumps({'msg': 'Remote origin does not exist'}) fetch_info = origin.fetch() if len(fetch_info) == 0: return json.dumps({'msg': "Didn't fetch any information from remote!"}) # Pull code (finally) pull_info = origin.pull() if len(pull_info) == 0: return json.dumps({'msg': "Didn't pull any information from remote!"}) if pull_info[0].flags > 128: return json.dumps({'msg': "Didn't pull any information from remote!"}) commit_hash = pull_info[0].commit.hexsha build_commit = 'build_commit = "{commit}"'.format(commit=commit_hash) with open('build_commit.py', 'w') as f: f.write(build_commit) # Update runCI run_ci_repo = path.join(app.config['INSTALL_FOLDER'], 'install', 'ci-vm', 'ci-linux', 'ci', 'runCI') run_ci_nfs = path.join(app.config['SAMPLE_REPOSITORY'], 'vm_data', app.config['KVM_LINUX_NAME'], 'runCI') copyfile(run_ci_repo, run_ci_nfs) # Reload platform service g.log.info('Platform upgraded to commit {commit}'.format(commit=commit_hash)) subprocess.Popen(["sudo", "service", "platform", "reload"]) g.log.info('Sample platform synced with Github!') return json.dumps({'msg': 'Platform upgraded to commit {commit}'.format(commit=commit_hash)})
Python
def before_app_request() -> None: """Organize menu content such as Platform management before request.""" config_entries = get_menu_entries( g.user, 'Platform mgmt', 'cog', [], '', [ {'title': 'Maintenance', 'icon': 'wrench', 'route': 'ci.show_maintenance', 'access': [Role.admin]}, # type: ignore {'title': 'Blocked Users', 'icon': 'ban', 'route': 'ci.blocked_users', 'access': [Role.admin]} # type: ignore ] ) if 'config' in g.menu_entries and 'entries' in config_entries: g.menu_entries['config']['entries'] = config_entries['entries'] + g.menu_entries['config']['entries'] else: g.menu_entries['config'] = config_entries
def before_app_request() -> None: """Organize menu content such as Platform management before request.""" config_entries = get_menu_entries( g.user, 'Platform mgmt', 'cog', [], '', [ {'title': 'Maintenance', 'icon': 'wrench', 'route': 'ci.show_maintenance', 'access': [Role.admin]}, # type: ignore {'title': 'Blocked Users', 'icon': 'ban', 'route': 'ci.blocked_users', 'access': [Role.admin]} # type: ignore ] ) if 'config' in g.menu_entries and 'entries' in config_entries: g.menu_entries['config']['entries'] = config_entries['entries'] + g.menu_entries['config']['entries'] else: g.menu_entries['config'] = config_entries
Python
def start_platforms(db, repository, delay=None, platform=None) -> None: """ Start new test on both platforms in parallel. We use multiprocessing module which bypasses Python GIL to make use of multiple cores of the processor. """ from run import config, log, app with app.app_context(): from flask import current_app if platform is None or platform == TestPlatform.linux: linux_kvm_name = config.get('KVM_LINUX_NAME', '') log.info('setting Linux virtual machine process...') linux_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, linux_kvm_name, TestPlatform.linux, repository, delay,)) linux_process.start() log.info('started Linux virtual machine process...') if platform is None or platform == TestPlatform.windows: win_kvm_name = config.get('KVM_WINDOWS_NAME', '') log.info('setting Windows virtual machine process...') windows_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, win_kvm_name, TestPlatform.windows, repository, delay,)) windows_process.start() log.info('started Windows virtual machine process...')
def start_platforms(db, repository, delay=None, platform=None) -> None: """ Start new test on both platforms in parallel. We use multiprocessing module which bypasses Python GIL to make use of multiple cores of the processor. """ from run import config, log, app with app.app_context(): from flask import current_app if platform is None or platform == TestPlatform.linux: linux_kvm_name = config.get('KVM_LINUX_NAME', '') log.info('setting Linux virtual machine process...') linux_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, linux_kvm_name, TestPlatform.linux, repository, delay,)) linux_process.start() log.info('started Linux virtual machine process...') if platform is None or platform == TestPlatform.windows: win_kvm_name = config.get('KVM_WINDOWS_NAME', '') log.info('setting Windows virtual machine process...') windows_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, win_kvm_name, TestPlatform.windows, repository, delay,)) windows_process.start() log.info('started Windows virtual machine process...')
Python
def inform_mailing_list(mailer, id, title, author, body) -> None: """ Send mail to subscribed users when a issue is opened via the Webhook. :param mailer: The mailer instance :type mailer: Mailer :param id: ID of the Issue Opened :type id: int :param title: Title of the Created Issue :type title: str :param author: The Authors Username of the Issue :type author: str :param body: The Content of the Issue :type body: str """ from run import get_github_issue_link subject = "GitHub Issue #{issue_number}".format(issue_number=id) url = get_github_issue_link(id) if not mailer.send_simple_message({ "to": "[email protected]", "subject": subject, "html": get_html_issue_body(title=title, author=author, body=body, issue_number=id, url=url) }): g.log.error('failed to send issue to mailing list')
def inform_mailing_list(mailer, id, title, author, body) -> None: """ Send mail to subscribed users when a issue is opened via the Webhook. :param mailer: The mailer instance :type mailer: Mailer :param id: ID of the Issue Opened :type id: int :param title: Title of the Created Issue :type title: str :param author: The Authors Username of the Issue :type author: str :param body: The Content of the Issue :type body: str """ from run import get_github_issue_link subject = "GitHub Issue #{issue_number}".format(issue_number=id) url = get_github_issue_link(id) if not mailer.send_simple_message({ "to": "[email protected]", "subject": subject, "html": get_html_issue_body(title=title, author=author, body=body, issue_number=id, url=url) }): g.log.error('failed to send issue to mailing list')
Python
def update_build_badge(status, test) -> None: """ Build status badge for current test to be displayed on sample-platform. :param status: current testing status :type status: str :param test: current commit that is tested :type test: Test :return: null :rtype: null """ if test.test_type == TestType.commit and check_main_repo(test.fork.github): parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) availableon = os.path.join(parent_dir, 'static', 'svg', '{status}-{platform}.svg'.format(status=status.upper(), platform=test.platform.value)) svglocation = os.path.join(parent_dir, 'static', 'img', 'status', 'build-{platform}.svg'.format(platform=test.platform.value)) shutil.copyfile(availableon, svglocation) g.log.info('Build badge updated successfully!') else: return
def update_build_badge(status, test) -> None: """ Build status badge for current test to be displayed on sample-platform. :param status: current testing status :type status: str :param test: current commit that is tested :type test: Test :return: null :rtype: null """ if test.test_type == TestType.commit and check_main_repo(test.fork.github): parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) availableon = os.path.join(parent_dir, 'static', 'svg', '{status}-{platform}.svg'.format(status=status.upper(), platform=test.platform.value)) svglocation = os.path.join(parent_dir, 'static', 'img', 'status', 'build-{platform}.svg'.format(platform=test.platform.value)) shutil.copyfile(availableon, svglocation) g.log.info('Build badge updated successfully!') else: return
Python
def progress_reporter(test_id, token): """ Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub. :param test_id: The id of the test to update. :type test_id: int :param token: The token to check the validity of the request. :type token: str :return: Nothing. :rtype: None """ from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: repo_folder = config.get('SAMPLE_REPOSITORY', '') if 'type' in request.form: if request.form['type'] == 'progress': log.info('progress method triggered by progress_reporter') ret_val = progress_type_request(log, test, test_id, request) if ret_val == "FAIL": return "FAIL" elif request.form['type'] == 'equality': log.info('equality method triggered by progress_reporter') equality_type_request(log, test_id, test, request) elif request.form['type'] == 'logupload': log.info('logupload method triggered by progress_reporter') ret_val = logupload_type_request(log, test_id, repo_folder, test, request) if ret_val == "EMPTY": return "EMPTY" elif request.form['type'] == 'upload': log.info('upload method triggered by progress_reporter') ret_val = upload_type_request(log, test_id, repo_folder, test, request) if ret_val == "EMPTY": return "EMPTY" elif request.form['type'] == 'finish': log.info('finish method triggered by progress_reporter') finish_type_request(log, test_id, test, request) return "OK" return "FAIL"
def progress_reporter(test_id, token): """ Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub. :param test_id: The id of the test to update. :type test_id: int :param token: The token to check the validity of the request. :type token: str :return: Nothing. :rtype: None """ from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: repo_folder = config.get('SAMPLE_REPOSITORY', '') if 'type' in request.form: if request.form['type'] == 'progress': log.info('progress method triggered by progress_reporter') ret_val = progress_type_request(log, test, test_id, request) if ret_val == "FAIL": return "FAIL" elif request.form['type'] == 'equality': log.info('equality method triggered by progress_reporter') equality_type_request(log, test_id, test, request) elif request.form['type'] == 'logupload': log.info('logupload method triggered by progress_reporter') ret_val = logupload_type_request(log, test_id, repo_folder, test, request) if ret_val == "EMPTY": return "EMPTY" elif request.form['type'] == 'upload': log.info('upload method triggered by progress_reporter') ret_val = upload_type_request(log, test_id, repo_folder, test, request) if ret_val == "EMPTY": return "EMPTY" elif request.form['type'] == 'finish': log.info('finish method triggered by progress_reporter') finish_type_request(log, test_id, test, request) return "OK" return "FAIL"
Python
def equality_type_request(log, test_id, test, request): """ Handle equality request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id']) ) rto = RegressionTestOutput.query.filter(RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format(test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit()
def equality_type_request(log, test_id, test, request): """ Handle equality request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id']) ) rto = RegressionTestOutput.query.filter(RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format(test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit()
Python
def logupload_type_request(log, test_id, repo_folder, test, request): """ Handle logupload request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param repo_folder: repository folder :type repo_folder: str :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug("Received log file for test {id}".format(id=test_id)) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join(repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) log.debug("Stored log file")
def logupload_type_request(log, test_id, repo_folder, test, request): """ Handle logupload request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param repo_folder: repository folder :type repo_folder: str :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug("Received log file for test {id}".format(id=test_id)) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join(repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) log.debug("Stored log file")
Python
def upload_type_request(log, test_id, repo_folder, test, request): """ Handle upload request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param repo_folder: repository folder :type repo_folder: str :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id']) ) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': log.warning('empty filename provided for uploading') return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension) ) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit()
def upload_type_request(log, test_id, repo_folder, test, request): """ Handle upload request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param repo_folder: repository folder :type repo_folder: str :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id']) ) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': log.warning('empty filename provided for uploading') return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension) ) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit()
Python
def finish_type_request(log, test_id, test, request): """ Handle finish request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Finish for {t}/{rt}'.format(t=test_id, rt=request.form['test_id'])) regression_test = RegressionTest.query.filter(RegressionTest.id == request.form['test_id']).first() result = TestResult( test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc ) g.db.add(result) try: g.db.commit() except IntegrityError as e: log.error('Could not save the results: {msg}'.format(msg=e))
def finish_type_request(log, test_id, test, request): """ Handle finish request type for progress reporter. :param log: logger :type log: Logger :param test_id: The id of the test to update. :type test_id: int :param test: concerned test :type test: Test :param request: Request parameters :type request: Request """ log.debug('Finish for {t}/{rt}'.format(t=test_id, rt=request.form['test_id'])) regression_test = RegressionTest.query.filter(RegressionTest.id == request.form['test_id']).first() result = TestResult( test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc ) g.db.add(result) try: g.db.commit() except IntegrityError as e: log.error('Could not save the results: {msg}'.format(msg=e))
Python
def comment_pr(test_id, state, pr_nr, platform) -> None: """ Upload the test report to the github PR as comment. :param test_id: The identity of Test whose report will be uploaded :type test_id: str :param state: The state of the PR. :type state: Status :param pr_nr: PR number to which test commit is related and comment will be uploaded :type: str :param platform :type: str """ from run import app, log regression_testid_passed = g.db.query(TestResult.regression_test_id).outerjoin( TestResultFile, TestResult.test_id == TestResultFile.test_id).filter( TestResult.test_id == test_id, TestResult.expected_rc == TestResult.exit_code, or_( TestResult.exit_code != 0, and_(TestResult.exit_code == 0, TestResult.regression_test_id == TestResultFile.regression_test_id, TestResultFile.got.is_(None) ), and_( RegressionTestOutput.regression_id == TestResult.regression_test_id, RegressionTestOutput.ignore.is_(True), ))).subquery() passed = g.db.query(label('category_id', Category.id), label( 'success', count(regressionTestLinkTable.c.regression_id))).filter( regressionTestLinkTable.c.regression_id.in_(regression_testid_passed), Category.id == regressionTestLinkTable.c.category_id).group_by( regressionTestLinkTable.c.category_id).subquery() tot = g.db.query(label('category', Category.name), label('total', count(regressionTestLinkTable.c.regression_id)), label('success', passed.c.success)).outerjoin( passed, passed.c.category_id == Category.id).filter( Category.id == regressionTestLinkTable.c.category_id).group_by( regressionTestLinkTable.c.category_id).all() regression_testid_failed = RegressionTest.query.filter(RegressionTest.id.notin_(regression_testid_passed)).all() template = app.jinja_env.get_or_select_template('ci/pr_comment.txt') message = template.render(tests=tot, failed_tests=regression_testid_failed, test_id=test_id, state=state, platform=platform) log.debug('Github PR Comment Message Created for Test_id: {test_id}'.format(test_id=test_id)) try: gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])(g.github['repository']) # Pull requests are just issues with code, so github consider pr comments in issues pull_request = repository.issues(pr_nr) comments = pull_request.comments().get() bot_name = g.github['bot_name'] comment_id = None for comment in comments: if comment['user']['login'] == bot_name and platform in comment['body']: comment_id = comment['id'] break log.debug('Github PR Comment ID Fetched for Test_id: {test_id}'.format(test_id=test_id)) if comment_id is None: comment = pull_request.comments().post(body=message) comment_id = comment['id'] else: repository.issues().comments(comment_id).post(body=message) log.debug('Github PR Comment ID {comment} Uploaded for Test_id: {test_id}'.format( comment=comment_id, test_id=test_id)) except Exception as e: log.error('Github PR Comment Failed for Test_id: {test_id} with Exception {e}'.format(test_id=test_id, e=e))
def comment_pr(test_id, state, pr_nr, platform) -> None: """ Upload the test report to the github PR as comment. :param test_id: The identity of Test whose report will be uploaded :type test_id: str :param state: The state of the PR. :type state: Status :param pr_nr: PR number to which test commit is related and comment will be uploaded :type: str :param platform :type: str """ from run import app, log regression_testid_passed = g.db.query(TestResult.regression_test_id).outerjoin( TestResultFile, TestResult.test_id == TestResultFile.test_id).filter( TestResult.test_id == test_id, TestResult.expected_rc == TestResult.exit_code, or_( TestResult.exit_code != 0, and_(TestResult.exit_code == 0, TestResult.regression_test_id == TestResultFile.regression_test_id, TestResultFile.got.is_(None) ), and_( RegressionTestOutput.regression_id == TestResult.regression_test_id, RegressionTestOutput.ignore.is_(True), ))).subquery() passed = g.db.query(label('category_id', Category.id), label( 'success', count(regressionTestLinkTable.c.regression_id))).filter( regressionTestLinkTable.c.regression_id.in_(regression_testid_passed), Category.id == regressionTestLinkTable.c.category_id).group_by( regressionTestLinkTable.c.category_id).subquery() tot = g.db.query(label('category', Category.name), label('total', count(regressionTestLinkTable.c.regression_id)), label('success', passed.c.success)).outerjoin( passed, passed.c.category_id == Category.id).filter( Category.id == regressionTestLinkTable.c.category_id).group_by( regressionTestLinkTable.c.category_id).all() regression_testid_failed = RegressionTest.query.filter(RegressionTest.id.notin_(regression_testid_passed)).all() template = app.jinja_env.get_or_select_template('ci/pr_comment.txt') message = template.render(tests=tot, failed_tests=regression_testid_failed, test_id=test_id, state=state, platform=platform) log.debug('Github PR Comment Message Created for Test_id: {test_id}'.format(test_id=test_id)) try: gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])(g.github['repository']) # Pull requests are just issues with code, so github consider pr comments in issues pull_request = repository.issues(pr_nr) comments = pull_request.comments().get() bot_name = g.github['bot_name'] comment_id = None for comment in comments: if comment['user']['login'] == bot_name and platform in comment['body']: comment_id = comment['id'] break log.debug('Github PR Comment ID Fetched for Test_id: {test_id}'.format(test_id=test_id)) if comment_id is None: comment = pull_request.comments().post(body=message) comment_id = comment['id'] else: repository.issues().comments(comment_id).post(body=message) log.debug('Github PR Comment ID {comment} Uploaded for Test_id: {test_id}'.format( comment=comment_id, test_id=test_id)) except Exception as e: log.error('Github PR Comment Failed for Test_id: {test_id} with Exception {e}'.format(test_id=test_id, e=e))
Python
def show_maintenance(): """ Get list of Virtual Machines under maintenance. :return: platforms in maintenance :rtype: dict """ return { 'platforms': MaintenanceMode.query.all() }
def show_maintenance(): """ Get list of Virtual Machines under maintenance. :return: platforms in maintenance :rtype: dict """ return { 'platforms': MaintenanceMode.query.all() }
Python
def blocked_users(): """ Render the blocked_users template. This returns a list of all currently blacklisted users. Also defines processing of forms to add/remove users from blacklist. When a user is added to blacklist, removes queued tests on any PR by the user. """ blocked_users = BlockedUsers.query.order_by(BlockedUsers.user_id) # Initialize usernames dictionary usernames = {u.user_id: 'Error, cannot get username' for u in blocked_users} for key in usernames.keys(): # Fetch usernames from GitHub API try: api_url = requests.get('https://api.github.com/user/{}'.format(key), timeout=10) userdata = api_url.json() # Set values to the actual usernames if no errors usernames[key] = userdata['login'] except requests.exceptions.RequestException: break # Define addUserForm processing add_user_form = AddUsersToBlacklist() if add_user_form.add.data and add_user_form.validate_on_submit(): if BlockedUsers.query.filter_by(user_id=add_user_form.user_id.data).first() is not None: flash('User already blocked.') return redirect(url_for('.blocked_users')) blocked_user = BlockedUsers(add_user_form.user_id.data, add_user_form.comment.data) g.db.add(blocked_user) g.db.commit() flash('User blocked successfully.') try: # Remove any queued pull request from blocked user gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])(g.github['repository']) # Getting all pull requests by blocked user on the repo pulls = repository.pulls.get() for pull in pulls: if pull['user']['id'] != add_user_form.user_id.data: continue tests = Test.query.filter(Test.pr_nr == pull['number']).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) g.db.commit() try: repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled since user blacklisted", context="CI - {name}".format(name=test.platform.value), target_url=url_for('test.by_id', test_id=test.id, _external=True) ) except ApiError as a: g.log.error('Got an exception while posting to GitHub! Message: {message}'.format( message=a.message)) except ApiError as a: g.log.error('Pull Requests of Blocked User could not be fetched: {res}'.format(res=a.response)) return redirect(url_for('.blocked_users')) # Define removeUserForm processing remove_user_form = RemoveUsersFromBlacklist() if remove_user_form.remove.data and remove_user_form.validate_on_submit(): blocked_user = BlockedUsers.query.filter_by(user_id=remove_user_form.user_id.data).first() if blocked_user is None: flash('No such user in Blacklist') return redirect(url_for('.blocked_users')) g.db.delete(blocked_user) g.db.commit() flash('User removed successfully.') return redirect(url_for('.blocked_users')) return{ 'addUserForm': add_user_form, 'removeUserForm': remove_user_form, 'blocked_users': blocked_users, 'usernames': usernames }
def blocked_users(): """ Render the blocked_users template. This returns a list of all currently blacklisted users. Also defines processing of forms to add/remove users from blacklist. When a user is added to blacklist, removes queued tests on any PR by the user. """ blocked_users = BlockedUsers.query.order_by(BlockedUsers.user_id) # Initialize usernames dictionary usernames = {u.user_id: 'Error, cannot get username' for u in blocked_users} for key in usernames.keys(): # Fetch usernames from GitHub API try: api_url = requests.get('https://api.github.com/user/{}'.format(key), timeout=10) userdata = api_url.json() # Set values to the actual usernames if no errors usernames[key] = userdata['login'] except requests.exceptions.RequestException: break # Define addUserForm processing add_user_form = AddUsersToBlacklist() if add_user_form.add.data and add_user_form.validate_on_submit(): if BlockedUsers.query.filter_by(user_id=add_user_form.user_id.data).first() is not None: flash('User already blocked.') return redirect(url_for('.blocked_users')) blocked_user = BlockedUsers(add_user_form.user_id.data, add_user_form.comment.data) g.db.add(blocked_user) g.db.commit() flash('User blocked successfully.') try: # Remove any queued pull request from blocked user gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])(g.github['repository']) # Getting all pull requests by blocked user on the repo pulls = repository.pulls.get() for pull in pulls: if pull['user']['id'] != add_user_form.user_id.data: continue tests = Test.query.filter(Test.pr_nr == pull['number']).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) g.db.commit() try: repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled since user blacklisted", context="CI - {name}".format(name=test.platform.value), target_url=url_for('test.by_id', test_id=test.id, _external=True) ) except ApiError as a: g.log.error('Got an exception while posting to GitHub! Message: {message}'.format( message=a.message)) except ApiError as a: g.log.error('Pull Requests of Blocked User could not be fetched: {res}'.format(res=a.response)) return redirect(url_for('.blocked_users')) # Define removeUserForm processing remove_user_form = RemoveUsersFromBlacklist() if remove_user_form.remove.data and remove_user_form.validate_on_submit(): blocked_user = BlockedUsers.query.filter_by(user_id=remove_user_form.user_id.data).first() if blocked_user is None: flash('No such user in Blacklist') return redirect(url_for('.blocked_users')) g.db.delete(blocked_user) g.db.commit() flash('User removed successfully.') return redirect(url_for('.blocked_users')) return{ 'addUserForm': add_user_form, 'removeUserForm': remove_user_form, 'blocked_users': blocked_users, 'usernames': usernames }
Python
def toggle_maintenance(platform, status): """ Toggle maintenance mode for a platform. :param platform: name of the platform :type platform: str :param status: current maintenance status :type status: str :return: success response if successful, failure response otherwise :rtype: JSON """ result = 'failed' message = 'Platform Not found' try: platform = TestPlatform.from_string(platform) db_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first() if db_mode is not None: db_mode.disabled = status == 'True' g.db.commit() result = 'success' message = '{platform} in maintenance? {status}'.format( platform=platform.description, status=("Yes" if db_mode.disabled else 'No') ) except ValueError: pass return jsonify({ 'status': result, 'message': message })
def toggle_maintenance(platform, status): """ Toggle maintenance mode for a platform. :param platform: name of the platform :type platform: str :param status: current maintenance status :type status: str :return: success response if successful, failure response otherwise :rtype: JSON """ result = 'failed' message = 'Platform Not found' try: platform = TestPlatform.from_string(platform) db_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first() if db_mode is not None: db_mode.disabled = status == 'True' g.db.commit() result = 'success' message = '{platform} in maintenance? {status}'.format( platform=platform.description, status=("Yes" if db_mode.disabled else 'No') ) except ValueError: pass return jsonify({ 'status': result, 'message': message })
Python
def in_maintenance_mode(platform): """ Check if platform in maintenance mode. :param platform: name of the platform :type platform: str :return: status of the platform :rtype: str """ try: platform = TestPlatform.from_string(platform) except ValueError: return 'ERROR' status = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first() if status is None: status = MaintenanceMode(platform, False) g.db.add(status) g.db.commit() return str(status.disabled)
def in_maintenance_mode(platform): """ Check if platform in maintenance mode. :param platform: name of the platform :type platform: str :return: status of the platform :rtype: str """ try: platform = TestPlatform.from_string(platform) except ValueError: return 'ERROR' status = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first() if status is None: status = MaintenanceMode(platform, False) g.db.add(status) g.db.commit() return str(status.disabled)
Python
def check_main_repo(repo_url) -> bool: """ Check whether the repo_url links to the main repository or not. :param repo_url: url of fork/main repository of the user :type repo_url: str :return: checks whether url of main repo is same or not :rtype: bool """ from run import config, get_github_config gh_config = get_github_config(config) return '{user}/{repo}'.format(user=gh_config['repository_owner'], repo=gh_config['repository']) in repo_url
def check_main_repo(repo_url) -> bool: """ Check whether the repo_url links to the main repository or not. :param repo_url: url of fork/main repository of the user :type repo_url: str :return: checks whether url of main repo is same or not :rtype: bool """ from run import config, get_github_config gh_config = get_github_config(config) return '{user}/{repo}'.format(user=gh_config['repository_owner'], repo=gh_config['repository']) in repo_url
Python
def decorated(*args, **kwargs): """ Invokes ``func``, catches expected errors, prints the error message and exits sceptre with a non-zero exit code. """ try: return func(*args, **kwargs) except KeyboardInterrupt: click.echo(" bye bye") except: if len(str(sys.exc_info()[1])) > 0: logger.error(sys.exc_info()[1]) sys.exit(1)
def decorated(*args, **kwargs): """ Invokes ``func``, catches expected errors, prints the error message and exits sceptre with a non-zero exit code. """ try: return func(*args, **kwargs) except KeyboardInterrupt: click.echo(" bye bye") except: if len(str(sys.exc_info()[1])) > 0: logger.error(sys.exc_info()[1]) sys.exit(1)
Python
def haversine(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 3956 # 6371 radius of earth in kilometers. Use 3956 for miles return c * r
def haversine(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 3956 # 6371 radius of earth in kilometers. Use 3956 for miles return c * r
Python
def clean_up_sentence(sentence): ''' Tokenize, lematize, return words from numerical data''' sentence_words = nltk.word_tokenize(sentence) sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] return sentence_words
def clean_up_sentence(sentence): ''' Tokenize, lematize, return words from numerical data''' sentence_words = nltk.word_tokenize(sentence) sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] return sentence_words
Python
def bag_of_words(sentence): '''Convert sentence into list of zeros and ones that indicates if word is there or not''' sentence_words = clean_up_sentence(sentence) bag = [0] * len(words) # as many zeros as words for w in sentence_words: for i, word in enumerate(words): if word == w: # if word inside bag - assign 1 bag[i] = 1 return np.array(bag)
def bag_of_words(sentence): '''Convert sentence into list of zeros and ones that indicates if word is there or not''' sentence_words = clean_up_sentence(sentence) bag = [0] * len(words) # as many zeros as words for w in sentence_words: for i, word in enumerate(words): if word == w: # if word inside bag - assign 1 bag[i] = 1 return np.array(bag)
Python
def hit_stay(card_value): """ Human chooses to hit or stay :param card_value: :return: """ global hitter, game, com_game, human_card_value, winner hitter = 4 while True: try: print("\r To hit type :y To stay type :n ", end=" ") response = input() if response.lower() == 'y': printing_card(non_repeat[hitter]) if deck[non_repeat[hitter]][1] == 'ace': print( "\rYou have an ace, Enter \"y\" to make it 1 or \"n\" to let it be 11 :", end=" ") ace_decider = input() if ace_decider.lower() == 'y': deck[non_repeat[hitter]][2] = 1 else: pass card_value = card_value + deck[non_repeat[hitter]][2] human_card_value = card_value # returning the human_card_value if card_value > 21: print(f"Human card value : {card_value}") print("Busted com won") winner = 'com' hitter += 1 game = False com_game = False break elif card_value == 21: print(card_value) print(f"Human card value : {card_value}") hitter += 1 break elif card_value < 21: print(f"Human card value : {card_value}") hitter += 1 continue elif response.lower() == 'n': print("so you are staying and Now Computer's turn") break except: continue
def hit_stay(card_value): """ Human chooses to hit or stay :param card_value: :return: """ global hitter, game, com_game, human_card_value, winner hitter = 4 while True: try: print("\r To hit type :y To stay type :n ", end=" ") response = input() if response.lower() == 'y': printing_card(non_repeat[hitter]) if deck[non_repeat[hitter]][1] == 'ace': print( "\rYou have an ace, Enter \"y\" to make it 1 or \"n\" to let it be 11 :", end=" ") ace_decider = input() if ace_decider.lower() == 'y': deck[non_repeat[hitter]][2] = 1 else: pass card_value = card_value + deck[non_repeat[hitter]][2] human_card_value = card_value # returning the human_card_value if card_value > 21: print(f"Human card value : {card_value}") print("Busted com won") winner = 'com' hitter += 1 game = False com_game = False break elif card_value == 21: print(card_value) print(f"Human card value : {card_value}") hitter += 1 break elif card_value < 21: print(f"Human card value : {card_value}") hitter += 1 continue elif response.lower() == 'n': print("so you are staying and Now Computer's turn") break except: continue
Python
def main(**kwargs): """ Adds a dimension to a variable in a netCDF file. Pulls VARIABLE out of SOURCE and writes it to TARGET indexed by VALUE along DIMENSION in addition to the dimensions originally present for it in SOURCE. Defaults to reading from STDIN and writing to STDOUT, which are specified with by supplying '-' as SOURCE and TARGET respectively. """ return add_dimension(**kwargs)
def main(**kwargs): """ Adds a dimension to a variable in a netCDF file. Pulls VARIABLE out of SOURCE and writes it to TARGET indexed by VALUE along DIMENSION in addition to the dimensions originally present for it in SOURCE. Defaults to reading from STDIN and writing to STDOUT, which are specified with by supplying '-' as SOURCE and TARGET respectively. """ return add_dimension(**kwargs)
Python
def db_session(engine): """ Provide a session context to communicate with the database. """ Session = sessionmaker(bind=engine) session = Session() try: yield session session.commit() except: # noqa session.rollback() raise finally: session.close()
def db_session(engine): """ Provide a session context to communicate with the database. """ Session = sessionmaker(bind=engine) session = Session() try: yield session session.commit() except: # noqa session.rollback() raise finally: session.close()
Python
def mapped_classes(metadata): """ Returns classes mapped to the openFRED database via SQLAlchemy. The classes are dynamically created and stored in a dictionary keyed by class names. The dictionary also contains the special entry `__Base__`, which an SQLAlchemy `declarative_base` instance used as the base class from which all mapped classes inherit. """ Base = declarative_base(metadata=metadata) classes = {"__Base__": Base} def map(name, registry, namespace): namespace["__tablename__"] = "openfred_" + name.lower() namespace["__table_args__"] = namespace.get("__table_args__", ()) + ( {"keep_existing": True}, ) if namespace["__tablename__"][-1] != "s": namespace["__tablename__"] += "s" registry[name] = type(name, (registry["__Base__"],), namespace) map( "Timespan", classes, { "id": C(BI, primary_key=True), "start": C(DT), "stop": C(DT), "resolution": C(Interval), "segments": C(ARRAY(DT, dimensions=2)), "__table_args__": (UC("start", "stop", "resolution"),), }, ) map( "Location", classes, { "id": C(BI, primary_key=True), "point": C( geotypes.Geometry(geometry_type="POINT", srid=4326), unique=True, ), }, ) # TODO: Handle units. class Variable(Base): __table_args__ = ({"keep_existing": True},) __tablename__ = "openfred_variables" id = C(BI, primary_key=True) name = C(Str(255), nullable=False, unique=True) # TODO: Figure out whether and where this is in the '.nc' files. type = C(Str(37)) netcdf_attributes = C(JSON) description = C(Text) standard_name = C(Str(255)) __mapper_args_ = { "polymorphic_identity": "variable", "polymorphic_on": type, } classes["Variable"] = Variable class Flags(Variable): __table_args__ = ({"keep_existing": True},) __tablename__ = "openfred_flags" id = C(BI, FK(Variable.id), primary_key=True) flag_ks = C(ARRAY(Int), nullable=False) flag_vs = C(ARRAY(Str(37)), nullable=False) __mapper_args_ = {"polymorphic_identity": "flags"} @property def flag(self, key): flags = dict(zip(self.flag_ks, self.flag_vs)) return flags[key] classes["Flags"] = Flags class Series(Base): __tablename__ = "openfred_series" __table_args__ = ( UC("height", "location_id", "timespan_id", "variable_id"), {"keep_existing": True}, ) id = C(BI, primary_key=True) values = C(ARRAY(Float), nullable=False) height = C(Float) timespan_id = C(BI, FK(classes["Timespan"].id), nullable=False) location_id = C(BI, FK(classes["Location"].id), nullable=False) variable_id = C(BI, FK(classes["Variable"].id), nullable=False) timespan = relationship(classes["Timespan"], backref="series") location = relationship(classes["Location"], backref="series") variable = relationship(classes["Variable"], backref="series") classes["Series"] = Series return classes
def mapped_classes(metadata): """ Returns classes mapped to the openFRED database via SQLAlchemy. The classes are dynamically created and stored in a dictionary keyed by class names. The dictionary also contains the special entry `__Base__`, which an SQLAlchemy `declarative_base` instance used as the base class from which all mapped classes inherit. """ Base = declarative_base(metadata=metadata) classes = {"__Base__": Base} def map(name, registry, namespace): namespace["__tablename__"] = "openfred_" + name.lower() namespace["__table_args__"] = namespace.get("__table_args__", ()) + ( {"keep_existing": True}, ) if namespace["__tablename__"][-1] != "s": namespace["__tablename__"] += "s" registry[name] = type(name, (registry["__Base__"],), namespace) map( "Timespan", classes, { "id": C(BI, primary_key=True), "start": C(DT), "stop": C(DT), "resolution": C(Interval), "segments": C(ARRAY(DT, dimensions=2)), "__table_args__": (UC("start", "stop", "resolution"),), }, ) map( "Location", classes, { "id": C(BI, primary_key=True), "point": C( geotypes.Geometry(geometry_type="POINT", srid=4326), unique=True, ), }, ) # TODO: Handle units. class Variable(Base): __table_args__ = ({"keep_existing": True},) __tablename__ = "openfred_variables" id = C(BI, primary_key=True) name = C(Str(255), nullable=False, unique=True) # TODO: Figure out whether and where this is in the '.nc' files. type = C(Str(37)) netcdf_attributes = C(JSON) description = C(Text) standard_name = C(Str(255)) __mapper_args_ = { "polymorphic_identity": "variable", "polymorphic_on": type, } classes["Variable"] = Variable class Flags(Variable): __table_args__ = ({"keep_existing": True},) __tablename__ = "openfred_flags" id = C(BI, FK(Variable.id), primary_key=True) flag_ks = C(ARRAY(Int), nullable=False) flag_vs = C(ARRAY(Str(37)), nullable=False) __mapper_args_ = {"polymorphic_identity": "flags"} @property def flag(self, key): flags = dict(zip(self.flag_ks, self.flag_vs)) return flags[key] classes["Flags"] = Flags class Series(Base): __tablename__ = "openfred_series" __table_args__ = ( UC("height", "location_id", "timespan_id", "variable_id"), {"keep_existing": True}, ) id = C(BI, primary_key=True) values = C(ARRAY(Float), nullable=False) height = C(Float) timespan_id = C(BI, FK(classes["Timespan"].id), nullable=False) location_id = C(BI, FK(classes["Location"].id), nullable=False) variable_id = C(BI, FK(classes["Variable"].id), nullable=False) timespan = relationship(classes["Timespan"], backref="series") location = relationship(classes["Location"], backref="series") variable = relationship(classes["Variable"], backref="series") classes["Series"] = Series return classes
Python
def openFRED(context): """ The openFRED command line toolbox. Contains useful commands to work with openFRED related/supplied data. """ context.obj = {}
def openFRED(context): """ The openFRED command line toolbox. Contains useful commands to work with openFRED related/supplied data. """ context.obj = {}
Python
def db(context, configuration_file, section): """ Commands to work with openFRED databases. """ if configuration_file is not None: oemof.db.load_config(configuration_file) context.obj["db"] = {"cfg": configuration_file, "section": section}
def db(context, configuration_file, section): """ Commands to work with openFRED databases. """ if configuration_file is not None: oemof.db.load_config(configuration_file) context.obj["db"] = {"cfg": configuration_file, "section": section}
Python
def import_(context, cleanup, jobs, paths, variables): """ Import an openFRED dataset. For each path found in PATHS, imports the NetCDF files found under path. If path is a directory, it is traversed (recursively) and each NetCDF file, i.e. each file with the extension '.nc', found is imported. These directories are also continuously monitored for new files, which are imported too. If path points to a file, it is imported as is. """ filepaths = { os.path.abspath(path) for path in paths if os.path.isfile(path) } section = context.obj["db"]["section"] schema = oemof.db.config.get(section, "schema") url = oemof.db.url(section) seen = set() manager = mp.Manager() messages = manager.Queue() pool = mp.Pool(jobs, maxtasksperchild=1) results = {"done": {}, "pending": {}} while True: filepaths.update(monitor(paths).difference(seen)) results["pending"].update( { job: pool.apply_async( wrap_process, kwds={ "job": job, "messages": messages, "function": import_variable, "arguments": dict( arguments, **{"schema": schema, "url": url} ), }, ) for filepath in filepaths # This `if` block will never filter out anything, because # `not messages.put` always evaluates to `True`. The block is # only here for the side effect of putting an appropriate # message on the message queue. if not messages.put( message( "Main Process ({})".format(os.getpid()), "Collecting {}.".format(filepath), ) ) for arguments in import_nc_file(filepath, variables) for job in [(filepath, arguments["name"])] } ) seen.update(filepaths) filepaths.clear() if cleanup: for path in seen.difference( path for path, _ in results["pending"] ): if os.path.isfile(path): os.remove(path) messages.put( message( "Main Process ({})".format(os.getpid()), "Deleting {}.".format(path), ) ) if not results["pending"] and messages.empty(): break move = [ (job, result) for job, result in results["pending"].items() if result.ready() ] for job, result in move: del results["pending"][job] results["done"][job] = result.get() while not messages.empty(): click.echo(messages.get_nowait()) sleep(1)
def import_(context, cleanup, jobs, paths, variables): """ Import an openFRED dataset. For each path found in PATHS, imports the NetCDF files found under path. If path is a directory, it is traversed (recursively) and each NetCDF file, i.e. each file with the extension '.nc', found is imported. These directories are also continuously monitored for new files, which are imported too. If path points to a file, it is imported as is. """ filepaths = { os.path.abspath(path) for path in paths if os.path.isfile(path) } section = context.obj["db"]["section"] schema = oemof.db.config.get(section, "schema") url = oemof.db.url(section) seen = set() manager = mp.Manager() messages = manager.Queue() pool = mp.Pool(jobs, maxtasksperchild=1) results = {"done": {}, "pending": {}} while True: filepaths.update(monitor(paths).difference(seen)) results["pending"].update( { job: pool.apply_async( wrap_process, kwds={ "job": job, "messages": messages, "function": import_variable, "arguments": dict( arguments, **{"schema": schema, "url": url} ), }, ) for filepath in filepaths # This `if` block will never filter out anything, because # `not messages.put` always evaluates to `True`. The block is # only here for the side effect of putting an appropriate # message on the message queue. if not messages.put( message( "Main Process ({})".format(os.getpid()), "Collecting {}.".format(filepath), ) ) for arguments in import_nc_file(filepath, variables) for job in [(filepath, arguments["name"])] } ) seen.update(filepaths) filepaths.clear() if cleanup: for path in seen.difference( path for path, _ in results["pending"] ): if os.path.isfile(path): os.remove(path) messages.put( message( "Main Process ({})".format(os.getpid()), "Deleting {}.".format(path), ) ) if not results["pending"] and messages.empty(): break move = [ (job, result) for job, result in results["pending"].items() if result.ready() ] for job, result in move: del results["pending"][job] results["done"][job] = result.get() while not messages.empty(): click.echo(messages.get_nowait()) sleep(1)
Python
def analyze_mac(mac): ''' Check whether the MAC is a Cisco formatted MAC or a normal MAC. ''' cisco_pattern = re.compile(r"([0-9a-fA-F]{4}(?:.[0-9a-fA-F]{4}){2})") linux_pattern = re.compile(r"([0-9a-fA-F]{2}(?::[0-9a-fA-F]{2}){5})") windows_pattern = re.compile(r"([0-9a-fA-F]{2}(?:-[0-9a-fA-F]{2}){5})") Cisco_MAC = re.findall(cisco_pattern, mac) Linux_MAC = re.findall(linux_pattern, mac) Windows_MAC = re.findall(windows_pattern, mac) mac_format = "" print('\033[1m', end="") print('\033[92m', end="") if Cisco_MAC: MAC = Cisco_MAC[0] print(f"[+] Cisco formatted MAC detected") mac_format = "cisco" elif Linux_MAC: MAC = Linux_MAC[0] print(f"[+] Linux formatted MAC detected") mac_format = "linux" elif Windows_MAC: MAC = Windows_MAC[0] print(f"[+] Windows formatted MAC detected") mac_format = "windows" else: print('\033[91m', end="") print(f"[❌] Invalid MAC address: {mac}") exit() print('\033[0m', end="") return MAC, mac_format
def analyze_mac(mac): ''' Check whether the MAC is a Cisco formatted MAC or a normal MAC. ''' cisco_pattern = re.compile(r"([0-9a-fA-F]{4}(?:.[0-9a-fA-F]{4}){2})") linux_pattern = re.compile(r"([0-9a-fA-F]{2}(?::[0-9a-fA-F]{2}){5})") windows_pattern = re.compile(r"([0-9a-fA-F]{2}(?:-[0-9a-fA-F]{2}){5})") Cisco_MAC = re.findall(cisco_pattern, mac) Linux_MAC = re.findall(linux_pattern, mac) Windows_MAC = re.findall(windows_pattern, mac) mac_format = "" print('\033[1m', end="") print('\033[92m', end="") if Cisco_MAC: MAC = Cisco_MAC[0] print(f"[+] Cisco formatted MAC detected") mac_format = "cisco" elif Linux_MAC: MAC = Linux_MAC[0] print(f"[+] Linux formatted MAC detected") mac_format = "linux" elif Windows_MAC: MAC = Windows_MAC[0] print(f"[+] Windows formatted MAC detected") mac_format = "windows" else: print('\033[91m', end="") print(f"[❌] Invalid MAC address: {mac}") exit() print('\033[0m', end="") return MAC, mac_format
Python
def convert_cisco_mac_to_linux(mac): ''' Gets a MAC and converts it to Linux format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace(".", ""): if char_count == 2 and total_chars != 11: mac_addr.append(char + ":") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
def convert_cisco_mac_to_linux(mac): ''' Gets a MAC and converts it to Linux format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace(".", ""): if char_count == 2 and total_chars != 11: mac_addr.append(char + ":") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
Python
def convert_linux_mac_to_cisco(mac): ''' Gets a MAC and converts it to Cisco format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace(":", ""): if char_count == 4 and total_chars != 11: mac_addr.append(char + ".") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
def convert_linux_mac_to_cisco(mac): ''' Gets a MAC and converts it to Cisco format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace(":", ""): if char_count == 4 and total_chars != 11: mac_addr.append(char + ".") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
Python
def convert_windows_mac_to_cisco(mac): ''' Gets a MAC and converts it to Cisco format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace("-", ""): if char_count == 4 and total_chars != 11: mac_addr.append(char + ".") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
def convert_windows_mac_to_cisco(mac): ''' Gets a MAC and converts it to Cisco format ''' char_count = 1 total_chars = 0 mac_addr = [] for char in mac.replace("-", ""): if char_count == 4 and total_chars != 11: mac_addr.append(char + ".") char_count = 1 else: mac_addr.append(char) char_count += 1 total_chars += 1 mac = "".join(x for x in mac_addr) return mac
Python
def open_yaml_file(yamlfile): ''' opens a YAML file and returns its content to lookup_mac() ''' with open(yamlfile, "r") as swfile: switches = yaml.safe_load(swfile) return switches
def open_yaml_file(yamlfile): ''' opens a YAML file and returns its content to lookup_mac() ''' with open(yamlfile, "r") as swfile: switches = yaml.safe_load(swfile) return switches
Python
def lookup_mac(username, password, mac, yamlfile): ''' receives MAC table and looks for intended mac ''' counter = 0 switches = open_yaml_file(yamlfile) sites = switches.keys() switch_list = [] seen_in_site = False print('\033[1m', end="") print('\033[92m', end="") print(f"[+] Searching for: ", end="") print('\033[94m', end="") print(f"{mac}") print('\033[92m', end="") for site in sites: if seen_in_site: break site_items = len(site) print(f"[+] Looking up {site} site on {site_items} devices.") print("-" * 50) for sw in switches[site]: seen_in_sw = False swname = sw['name'] swip = sw['mgmt_ip'] sshport = sw['port'] swname, mac_table = SSH_to_SW(username, password, swip, sshport, swname) if not mac_table: print('\033[93m', end="") print("[-] MAC table fetch was not successful") print('\033[0m', end="") continue for line in mac_table.splitlines(): if mac in line: counter += 1 switch_list.append(swname) vlan = line.split()[0] # MAC = line.split()[1] mactype = line.split()[2] port = line.split()[3] seen_in_site = True seen_in_sw = True print('\033[1m', end="") print('\033[92m', end="") print(f" |---> [+] Found [{mac}] in {swname}", end="") print(f" on port {port} and VLAN number of {vlan}") print(f" |---> [*] MAC is learned {mactype}ALLY\n") print('\033[0m', end="") if not seen_in_sw: print('\033[93m', end="") print(f"[-] MAC not found on {swname}") print('\033[0m', end="") print('\033[1m', end="") print('\033[95m') if counter == 0: print(f"[-] {mac} MAC address was not found") exit() print(f"[+] MAC was seen on {counter} switch(es)") for sw in switch_list: print(f"\t{sw}", flush=True) print('\033[0m')
def lookup_mac(username, password, mac, yamlfile): ''' receives MAC table and looks for intended mac ''' counter = 0 switches = open_yaml_file(yamlfile) sites = switches.keys() switch_list = [] seen_in_site = False print('\033[1m', end="") print('\033[92m', end="") print(f"[+] Searching for: ", end="") print('\033[94m', end="") print(f"{mac}") print('\033[92m', end="") for site in sites: if seen_in_site: break site_items = len(site) print(f"[+] Looking up {site} site on {site_items} devices.") print("-" * 50) for sw in switches[site]: seen_in_sw = False swname = sw['name'] swip = sw['mgmt_ip'] sshport = sw['port'] swname, mac_table = SSH_to_SW(username, password, swip, sshport, swname) if not mac_table: print('\033[93m', end="") print("[-] MAC table fetch was not successful") print('\033[0m', end="") continue for line in mac_table.splitlines(): if mac in line: counter += 1 switch_list.append(swname) vlan = line.split()[0] # MAC = line.split()[1] mactype = line.split()[2] port = line.split()[3] seen_in_site = True seen_in_sw = True print('\033[1m', end="") print('\033[92m', end="") print(f" |---> [+] Found [{mac}] in {swname}", end="") print(f" on port {port} and VLAN number of {vlan}") print(f" |---> [*] MAC is learned {mactype}ALLY\n") print('\033[0m', end="") if not seen_in_sw: print('\033[93m', end="") print(f"[-] MAC not found on {swname}") print('\033[0m', end="") print('\033[1m', end="") print('\033[95m') if counter == 0: print(f"[-] {mac} MAC address was not found") exit() print(f"[+] MAC was seen on {counter} switch(es)") for sw in switch_list: print(f"\t{sw}", flush=True) print('\033[0m')
Python
def SSH_to_SW(username, password, swip, sshport, swname): ''' SSH to switches and look for MAC address ''' get_mac_command = "show mac address-table" sshport = str(sshport) device = { 'device_type': 'cisco_ios', 'ip': swip, 'port': sshport, 'username': username, 'password': password, } print('\033[1;36m', end="") print(f"[*] Connecting to {swname} using {swip}") print('\033[0m', end="") try: net_connect = ConnectHandler(**device) mac_table = net_connect.send_command(get_mac_command, delay_factor=10) net_connect.send_command("\n", delay_factor=10) return swname, mac_table except Exception as e: print('\033[91m', end="") print(f"[-] Could not connect to {swname} using {swip}") print(e) print('\033[0m', end="") pass
def SSH_to_SW(username, password, swip, sshport, swname): ''' SSH to switches and look for MAC address ''' get_mac_command = "show mac address-table" sshport = str(sshport) device = { 'device_type': 'cisco_ios', 'ip': swip, 'port': sshport, 'username': username, 'password': password, } print('\033[1;36m', end="") print(f"[*] Connecting to {swname} using {swip}") print('\033[0m', end="") try: net_connect = ConnectHandler(**device) mac_table = net_connect.send_command(get_mac_command, delay_factor=10) net_connect.send_command("\n", delay_factor=10) return swname, mac_table except Exception as e: print('\033[91m', end="") print(f"[-] Could not connect to {swname} using {swip}") print(e) print('\033[0m', end="") pass
Python
def decode_form(form_b64: bytes) -> Dict[str, Union[str, datetime]]: """Get form data from QRCode. This function: - decodes the qr-code bytes as utf8 as we expect base64 anyway, - decodes the utf8 string as base64, - loads the result as json, - validates the resulting object against the modules jsonschema SCHEMA, - and finally tries to convert the date field into a datetime object If anything goes wrong a FormDecodeError is raised, otherwise the form data is returned as a dict. """ try: form = json.loads(base64.b64decode(form_b64).decode()) validate(form, SCHEMA) form['date_parsed'] = datetime.strptime(form['date'], "%d.%m.%Y") except Exception as e: raise FormDecodeError( "Loading or validation of form failed: {}".format(str(e))) return form
def decode_form(form_b64: bytes) -> Dict[str, Union[str, datetime]]: """Get form data from QRCode. This function: - decodes the qr-code bytes as utf8 as we expect base64 anyway, - decodes the utf8 string as base64, - loads the result as json, - validates the resulting object against the modules jsonschema SCHEMA, - and finally tries to convert the date field into a datetime object If anything goes wrong a FormDecodeError is raised, otherwise the form data is returned as a dict. """ try: form = json.loads(base64.b64decode(form_b64).decode()) validate(form, SCHEMA) form['date_parsed'] = datetime.strptime(form['date'], "%d.%m.%Y") except Exception as e: raise FormDecodeError( "Loading or validation of form failed: {}".format(str(e))) return form
Python
def main(): """Demo how to use this lib""" test_form = { "version": "alpha", "payment": "regular", "firstname": "Hannah", "lastname": "Acker", "email": "[email protected]", "pgp": "0x1111111111111111", "addr1": "Hauptstraße 1", "addr2": "12345 Entenhausen", "addr3": "c/o Frank Nord", "country": "DE", "date": "29.3.2018", } try: form = decode_form(_encode_form(test_form)) print("Valid Form: {}".format(form)) test_form['version'] = "foobar" form = decode_form(_encode_form(test_form)) except FormDecodeError as e: print("Invalid Form: {}".format(str(e)))
def main(): """Demo how to use this lib""" test_form = { "version": "alpha", "payment": "regular", "firstname": "Hannah", "lastname": "Acker", "email": "[email protected]", "pgp": "0x1111111111111111", "addr1": "Hauptstraße 1", "addr2": "12345 Entenhausen", "addr3": "c/o Frank Nord", "country": "DE", "date": "29.3.2018", } try: form = decode_form(_encode_form(test_form)) print("Valid Form: {}".format(form)) test_form['version'] = "foobar" form = decode_form(_encode_form(test_form)) except FormDecodeError as e: print("Invalid Form: {}".format(str(e)))
Python
def write_to_tmp_dir(self, new_contents): """ Write new contents to the temporary directory of program :param new_contents: The new contents of the program. Refer to *apply* method of :py:class:`.patch.Patch` :type new_contents: dict(str, ?) :rtype: None """ for target_file in new_contents: engine = self.engines[target_file] tmp_path = os.path.join(self.work_path, target_file) engine.write_to_tmp_dir(new_contents[target_file], tmp_path)
def write_to_tmp_dir(self, new_contents): """ Write new contents to the temporary directory of program :param new_contents: The new contents of the program. Refer to *apply* method of :py:class:`.patch.Patch` :type new_contents: dict(str, ?) :rtype: None """ for target_file in new_contents: engine = self.engines[target_file] tmp_path = os.path.join(self.work_path, target_file) engine.write_to_tmp_dir(new_contents[target_file], tmp_path)
Python
def dump(self, contents, file_name): """ Convert contents of file to the source code :param contents_of_file: The contents of the file which is the parsed form of source code :type contents_of_file: ? :return: The source code :rtype: str """ return self.engines[file_name].dump(contents[file_name])
def dump(self, contents, file_name): """ Convert contents of file to the source code :param contents_of_file: The contents of the file which is the parsed form of source code :type contents_of_file: ? :return: The source code :rtype: str """ return self.engines[file_name].dump(contents[file_name])
Python
def apply(self, patch): """ This method applies the patch to the target program. It does not directly modify the source code of the original program, but modifies the copied program within the temporary directory. :return: The contents of the patch-applied program, See *Hint*. :rtype: dict(str, list(str)) .. hint:: - key: The target file name(path) related to the program root path - value: The contents of the file """ self.reset_tmp_variant() new_contents = self.get_modified_contents(patch) self.write_to_tmp_dir(new_contents) return new_contents
def apply(self, patch): """ This method applies the patch to the target program. It does not directly modify the source code of the original program, but modifies the copied program within the temporary directory. :return: The contents of the patch-applied program, See *Hint*. :rtype: dict(str, list(str)) .. hint:: - key: The target file name(path) related to the program root path - value: The contents of the file """ self.reset_tmp_variant() new_contents = self.get_modified_contents(patch) self.write_to_tmp_dir(new_contents) return new_contents
Python
def diff(self, patch) -> str: """ Compare the source codes of original program and the patch-applied program using *difflib* module(https://docs.python.org/3.6/library/difflib.html). :return: The file comparison result :rtype: str """ diffs = '' new_contents = self.get_modified_contents(patch) for file_name in self.target_files: orig = self.dump(self.contents, file_name) modi = self.dump(new_contents, file_name) orig_list = list(map(lambda s: s+'\n', orig.splitlines())) modi_list = list(map(lambda s: s+'\n', modi.splitlines())) for diff in difflib.context_diff(orig_list, modi_list, fromfile="before: " + file_name, tofile="after: " + file_name): diffs += diff return diffs
def diff(self, patch) -> str: """ Compare the source codes of original program and the patch-applied program using *difflib* module(https://docs.python.org/3.6/library/difflib.html). :return: The file comparison result :rtype: str """ diffs = '' new_contents = self.get_modified_contents(patch) for file_name in self.target_files: orig = self.dump(self.contents, file_name) modi = self.dump(new_contents, file_name) orig_list = list(map(lambda s: s+'\n', orig.splitlines())) modi_list = list(map(lambda s: s+'\n', modi.splitlines())) for diff in difflib.context_diff(orig_list, modi_list, fromfile="before: " + file_name, tofile="after: " + file_name): diffs += diff return diffs