repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
ourownstory/neural_prophet
929
ourownstory__neural_prophet-929
[ "810" ]
cce32c0dc4fba7c0e79944342454d2def955e51c
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -24,10 +24,9 @@ class ShiftScale: def prep_or_copy_df(df): """Copy df if it contains the ID column. Creates ID column with '__df__' if it is a df with a single time series. - Converts a dict to the right df format (it will be deprecated soon). Parameters ---------- - df : pd.DataFrame, dict (deprecated) + df : pd.DataFrame df or dict containing data Returns ------- @@ -37,12 +36,9 @@ def prep_or_copy_df(df): whether the ID col was present bool wheter it is a single time series - bool - wheter a dict was received """ received_ID_col = False received_single_time_series = True - received_dict = False if isinstance(df, pd.DataFrame): new_df = df.copy(deep=True) if "ID" in df.columns: @@ -56,27 +52,18 @@ def prep_or_copy_df(df): else: new_df["ID"] = "__df__" log.debug("Received df with single time series") - elif isinstance(df, dict): - if len(df) > 1: - received_single_time_series = False - received_dict = True - log.warning("dict as input are deprecated. Please, use dataframes with ‘ID’ column instead") - new_df = pd.DataFrame() - for df_name, df_i in df.items(): - df_i["ID"] = df_name - new_df = pd.concat((new_df, df_i.copy(deep=True)), ignore_index=True) elif df is None: raise ValueError("df is None") else: - raise ValueError("Please, insert valid df type (i.e. pd.DataFrame, dict)") + raise ValueError("Please, insert valid df type (pd.DataFrame)") # list of IDs id_list = list(new_df.ID.unique()) - return new_df, received_ID_col, received_single_time_series, received_dict, id_list + return new_df, received_ID_col, received_single_time_series, id_list -def return_df_in_original_format(df, received_ID_col=False, received_single_time_series=True, received_dict=False): +def return_df_in_original_format(df, received_ID_col=False, received_single_time_series=True): """Return dataframe in the original format. Parameters @@ -87,22 +74,16 @@ def return_df_in_original_format(df, received_ID_col=False, received_single_time whether the ID col was present received_single_time_series: bool wheter it is a single time series - received_dict: bool - wheter data originated from a dict Returns ------- - pd.Dataframe, dict (deprecated) + pd.Dataframe original input format """ - if received_dict: - new_df = {df_name: df_i.loc[:, df.columns != "ID"].copy(deep=True) for (df_name, df_i) in df.groupby("ID")} - log.info("Returning dict") - else: - new_df = df.copy(deep=True) - if not received_ID_col and received_single_time_series: - assert len(new_df["ID"].unique()) == 1 - new_df.drop("ID", axis=1, inplace=True) - log.info("Returning df with no ID column") + new_df = df.copy(deep=True) + if not received_ID_col and received_single_time_series: + assert len(new_df["ID"].unique()) == 1 + new_df.drop("ID", axis=1, inplace=True) + log.info("Returning df with no ID column") return new_df @@ -305,7 +286,7 @@ def init_data_params( ShiftScale entries containing ``shift`` and ``scale`` parameters for each column """ # Compute Global data params - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) df_merged = df.copy(deep=True).drop("ID", axis=1) global_data_params = data_params_definition( df_merged, normalize, config_lagged_regressors, config_regressors, config_events @@ -512,7 +493,7 @@ def check_dataframe(df, check_y=True, covariates=None, regressors=None, events=N pd.DataFrame or dict checked dataframe """ - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) checked_df = pd.DataFrame() regressors_to_remove = [] for df_name, df_i in df.groupby("ID"): @@ -666,7 +647,7 @@ def _crossvalidation_with_time_threshold(df, n_lags, n_forecasts, k, fold_pct, f min_train = total_samples - samples_fold - (k - 1) * (samples_fold - samples_overlap) assert min_train >= samples_fold folds = [] - df_fold, _, _, _, _ = prep_or_copy_df(df) + df_fold, _, _, _ = prep_or_copy_df(df) for i in range(k, 0, -1): threshold_time_stamp = find_time_threshold(df_fold, n_lags, n_forecasts, samples_fold, inputs_overbleed=True) df_train, df_val = split_considering_timestamp( @@ -725,7 +706,7 @@ def crossvalidation_split_df( validation data """ - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) if len(df["ID"].unique()) == 1: for df_name, df_i in df.groupby("ID"): folds = _crossvalidation_split_df(df_i, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct) @@ -783,7 +764,7 @@ def double_crossvalidation_split_df(df, n_lags, n_forecasts, k, valid_pct, test_ tuple of k tuples [(folds_val, folds_test), …] elements same as :meth:`crossvalidation_split_df` returns """ - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) if len(df["ID"].unique()) > 1: raise NotImplementedError("double_crossvalidation_split_df not implemented for df with many time series") fold_pct_test = float(test_pct) / k @@ -943,7 +924,7 @@ def split_df(df, n_lags, n_forecasts, valid_p=0.2, inputs_overbleed=True, local_ pd.DataFrame, dict validation data """ - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) df_train = pd.DataFrame() df_val = pd.DataFrame() if local_split: @@ -1330,7 +1311,7 @@ def infer_frequency(df, freq, n_lags, min_freq_percentage=0.7): Valid frequency tag according to major frequency. """ - df, _, _, _, _ = prep_or_copy_df(df) + df, _, _, _ = prep_or_copy_df(df) freq_df = list() for df_name, df_i in df.groupby("ID"): freq_df.append(_infer_frequency(df_i, freq, min_freq_percentage)) @@ -1374,7 +1355,6 @@ def create_dict_for_events_or_regressors(df, other_df, other_df_name): # Not su received_ID_col, _, _, - _, ) = prep_or_copy_df(other_df) # if other_df does not contain ID, create dictionary with original ID with the same other_df for each ID if not received_ID_col: diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -671,7 +671,7 @@ def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False # Setup # List of different time series IDs, for global-local modelling (if enabled) - df, _, _, _, self.id_list = df_utils.prep_or_copy_df(df) + df, _, _, self.id_list = df_utils.prep_or_copy_df(df) # When only one time series is input, self.id_list = ['__df__'] self.nb_trends_modelled = len(self.id_list) if self.config_trend.trend_global_local == "local" else 1 @@ -689,7 +689,7 @@ def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False ) # Pre-processing - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=True, exogenous=True) self.data_freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq) df = self._handle_missing_data(df, freq=self.data_freq) @@ -701,7 +701,7 @@ def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False if validation_df is None: metrics_df = self._train(df, minimal=minimal, continue_training=continue_training) else: - df_val, _, _, _, _ = df_utils.prep_or_copy_df(validation_df) + df_val, _, _, _ = df_utils.prep_or_copy_df(validation_df) df_val = self._check_dataframe(df_val, check_y=False, exogenous=False) df_val = self._handle_missing_data(df_val, freq=self.data_freq) metrics_df = self._train(df, df_val=df_val, minimal=minimal, continue_training=continue_training) @@ -760,7 +760,7 @@ def predict(self, df, decompose=True, raw=False): log.warning("Raw forecasts are incompatible with plotting utilities") if self.fitted is False: raise ValueError("Model has not been fitted. Predictions will be random.") - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) # to get all forecasteable values with df given, maybe extend into future: df, periods_added = self._maybe_extend_df(df) df = self._prepare_dataframe_to_predict(df) @@ -781,9 +781,7 @@ def predict(self, df, decompose=True, raw=False): if periods_added[df_name] > 0: fcst = fcst[: -periods_added[df_name]] forecast = pd.concat((forecast, fcst), ignore_index=True) - df = df_utils.return_df_in_original_format( - forecast, received_ID_col, received_single_time_series, received_dict - ) + df = df_utils.return_df_in_original_format(forecast, received_ID_col, received_single_time_series) self.predict_steps = self.n_forecasts return df @@ -799,7 +797,7 @@ def test(self, df): pd.DataFrame evaluation metrics """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) if self.fitted is False: log.warning("Model has not been fitted. Test results will be random.") df = self._check_dataframe(df, check_y=True, exogenous=True) @@ -926,7 +924,7 @@ def split_df(self, df, freq="auto", valid_p=0.2, local_split=False): 1 2022-12-13 8.02 data2 2 2022-12-13 8.30 data3 """ - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=False, exogenous=False) freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq) df = self._handle_missing_data(df, freq=freq, predicting=False) @@ -938,12 +936,8 @@ def split_df(self, df, freq="auto", valid_p=0.2, local_split=False): inputs_overbleed=True, local_split=local_split, ) - df_train = df_utils.return_df_in_original_format( - df_train, received_ID_col, received_single_time_series, received_dict - ) - df_val = df_utils.return_df_in_original_format( - df_val, received_ID_col, received_single_time_series, received_dict - ) + df_train = df_utils.return_df_in_original_format(df_train, received_ID_col, received_single_time_series) + df_val = df_utils.return_df_in_original_format(df_val, received_ID_col, received_single_time_series) return df_train, df_val def crossvalidation_split_df( @@ -1092,7 +1086,7 @@ def crossvalidation_split_df( 1 2022-12-10 8.25 data2 2 2022-12-10 7.55 data3 """ - df, received_ID_col, received_single_time_series, _, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=False, exogenous=False) freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq) df = self._handle_missing_data(df, freq=freq, predicting=False) @@ -1137,7 +1131,7 @@ def double_crossvalidation_split_df(self, df, freq="auto", k=5, valid_pct=0.10, tuple of k tuples [(folds_val, folds_test), …] elements same as :meth:`crossvalidation_split_df` returns """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=False, exogenous=False) freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq) df = self._handle_missing_data(df, freq=freq, predicting=False) @@ -1172,7 +1166,7 @@ def create_df_with_events(self, df, events_df): "The events configs should be added to the NeuralProphet object (add_events fn)" "before creating the data with events features" ) - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=True, exogenous=False) df_dict_events = df_utils.create_dict_for_events_or_regressors(df, events_df, "events") df_created = pd.DataFrame() @@ -1186,9 +1180,7 @@ def create_df_with_events(self, df, events_df): ) df_aux["ID"] = df_name df_created = pd.concat((df_created, df_aux), ignore_index=True) - df = df_utils.return_df_in_original_format( - df_created, received_ID_col, received_single_time_series, received_dict - ) + df = df_utils.return_df_in_original_format(df_created, received_ID_col, received_single_time_series) return df def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods=None, n_historic_predictions=False): @@ -1242,7 +1234,7 @@ def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods= >>> forecast = m.predict(df=future) """ - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) events_dict = df_utils.create_dict_for_events_or_regressors(df, events_df, "events") regressors_dict = df_utils.create_dict_for_events_or_regressors(df, regressors_df, "regressors") @@ -1259,7 +1251,7 @@ def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods= df_future_dataframe = pd.concat((df_future_dataframe, df_aux), ignore_index=True) df_future = df_utils.return_df_in_original_format( - df_future_dataframe, received_ID_col, received_single_time_series, received_dict + df_future_dataframe, received_ID_col, received_single_time_series ) return df_future @@ -1317,7 +1309,7 @@ def predict_trend(self, df, quantile=0.5): if quantile is not None and not (0 < quantile < 1): raise ValueError("The quantile specified need to be a float in-between (0,1)") - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=False, exogenous=False) df = self._normalize(df) df_trend = pd.DataFrame() @@ -1340,9 +1332,7 @@ def predict_trend(self, df, quantile=0.5): trend = trend * data_params["y"].scale + data_params["y"].shift df_aux = pd.DataFrame({"ds": df_i["ds"], "trend": trend, "ID": df_name}) df_trend = pd.concat((df_trend, df_aux), ignore_index=True) - df = df_utils.return_df_in_original_format( - df_trend, received_ID_col, received_single_time_series, received_dict - ) + df = df_utils.return_df_in_original_format(df_trend, received_ID_col, received_single_time_series) return df def predict_seasonal_components(self, df, quantile=0.5): @@ -1363,7 +1353,7 @@ def predict_seasonal_components(self, df, quantile=0.5): if quantile is not None and not (0 < quantile < 1): raise ValueError("The quantile specified need to be a float in-between (0,1)") - df, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(df) + df, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(df) df = self._check_dataframe(df, check_y=False, exogenous=False) df = self._normalize(df) df_seasonal = pd.DataFrame() @@ -1410,9 +1400,7 @@ def predict_seasonal_components(self, df, quantile=0.5): predicted[name] = predicted[name] * data_params["y"].scale df_aux = pd.DataFrame({"ds": df_i["ds"], "ID": df_i["ID"], **predicted}) df_seasonal = pd.concat((df_seasonal, df_aux), ignore_index=True) - df = df_utils.return_df_in_original_format( - df_seasonal, received_ID_col, received_single_time_series, received_dict - ) + df = df_utils.return_df_in_original_format(df_seasonal, received_ID_col, received_single_time_series) return df def set_true_ar_for_eval(self, true_ar_weights): @@ -1488,7 +1476,7 @@ def plot(self, fcst, df_name=None, ax=None, xlabel="ds", ylabel="y", figsize=(10 * ``matplotlib``: use matplotlib for plotting * (default) ``default``: use the global default for plotting """ - fcst, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(fcst) + fcst, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(fcst) if not received_single_time_series: if df_name not in fcst["ID"].unique(): assert len(fcst["ID"].unique()) > 1 @@ -1595,7 +1583,7 @@ def get_latest_forecast( """ if self.max_lags == 0: raise ValueError("Use the standard plot function for models without lags.") - fcst, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(fcst) + fcst, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(fcst) if not received_single_time_series: if df_name not in fcst["ID"].unique(): assert len(fcst["ID"].unique()) > 1 @@ -1663,7 +1651,7 @@ def plot_latest_forecast( """ if self.max_lags == 0: raise ValueError("Use the standard plot function for models without lags.") - fcst, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(fcst) + fcst, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(fcst) if not received_single_time_series: if df_name not in fcst["ID"].unique(): assert len(fcst["ID"].unique()) > 1 @@ -1746,7 +1734,7 @@ def plot_components( matplotlib.axes.Axes plot of NeuralProphet components """ - fcst, received_ID_col, received_single_time_series, received_dict, _ = df_utils.prep_or_copy_df(fcst) + fcst, received_ID_col, received_single_time_series, _ = df_utils.prep_or_copy_df(fcst) if not received_single_time_series: if df_name not in fcst["ID"].unique(): assert len(fcst["ID"].unique()) > 1 @@ -1976,7 +1964,7 @@ def _create_dataset(self, df, predict_mode): ------- TimeDataset """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) return time_dataset.GlobalTimeDataset( df, predict_mode=predict_mode, @@ -2139,7 +2127,7 @@ def _handle_missing_data(self, df, freq, predicting=False): ------- pre-processed df """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df_handled_missing = pd.DataFrame() for df_name, df_i in df.groupby("ID"): df_handled_missing_aux = self.__handle_missing_data(df_i, freq, predicting).copy(deep=True) @@ -2170,7 +2158,7 @@ def _check_dataframe(self, df, check_y=True, exogenous=True): pd.DataFrame checked dataframe """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df, regressors_to_remove = df_utils.check_dataframe( df=df, check_y=check_y, @@ -2249,7 +2237,7 @@ def _normalize(self, df): ------- df: pd.DataFrame, normalized """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df_norm = pd.DataFrame() for df_name, df_i in df.groupby("ID"): data_params = self.config_normalization.get_data_params(df_name) @@ -2271,7 +2259,7 @@ def _init_train_loader(self, df): ------- torch DataLoader """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) # if not self.fitted: self.config_normalization.init_data_params( df=df, @@ -2316,7 +2304,7 @@ def _init_val_loader(self, df): ------- torch DataLoader """ - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) df = self._normalize(df) dataset = self._create_dataset(df, predict_mode=False) loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False) @@ -2343,12 +2331,12 @@ def _train(self, df, df_val=None, minimal=False, continue_training=False): metrics """ # Set up data the training dataloader - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) train_loader = self._init_train_loader(df) # Set up data the validation dataloader if df_val is not None: - df_val, _, _, _, _ = df_utils.prep_or_copy_df(df_val) + df_val, _, _, _ = df_utils.prep_or_copy_df(df_val) val_loader = self._init_val_loader(df_val) # TODO: check how to handle this with Lightning (the rest moved to utils.configure_denormalization)
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -63,7 +63,7 @@ def test_df_utils_func(): df, _ = df_utils.check_dataframe(df, check_y=False) # test find_time_threshold - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) time_threshold = df_utils.find_time_threshold(df, n_lags=2, n_forecasts=2, valid_p=0.2, inputs_overbleed=True) df_train, df_val = df_utils.split_considering_timestamp( df, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold @@ -1563,82 +1563,6 @@ def test_drop_missing_values_after_imputation(): forecast = m2.predict(df=future) -def test_dict_input(): - ### Deprecated - dict as input - log.info("Global Modeling - Dict as input") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df2_0 = df.iloc[128:256, :].copy(deep=True) - df3_0 = df.iloc[256:384, :].copy(deep=True) - df4_0 = df.iloc[384:, :].copy(deep=True) - train_input = {0: df1_0, 1: {"df1": df1_0, "df2": df2_0}, 2: {"df1": df1_0, "df2": df2_0}} - test_input = {0: df3_0, 1: {"df1": df3_0}, 2: {"df1": df3_0, "df2": df4_0}} - info_input = { - 0: "Testing df train / df test - no events, no regressors", - 1: "Testing dict df train / df test - no events, no regressors", - 2: "Testing dict df train / dict df test - no events, no regressors", - } - for i in range(0, 3): - log.info(info_input[i]) - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - trend_global_local="global", - season_global_local="global", - ) - metrics = m.fit(train_input[i], freq="D") - forecast = m.predict(df=test_input[i]) - forecast_trend = m.predict_trend(df=test_input[i]) - forecast_seasonal_componets = m.predict_seasonal_components(df=test_input[i]) - if PLOT: - forecast = forecast if isinstance(forecast, dict) else {"df": forecast} - for key in forecast: - fig1 = m.plot(forecast[key]) - if key != "df": - fig2 = m.plot_parameters(df_name=key) - else: - fig2 = m.plot_parameters() - with pytest.raises(ValueError): - forecast = m.predict({"df4": df4_0}) - log.info("Error - dict with names not provided in the train dict (not in the data params dict)") - with pytest.raises(ValueError): - metrics = m.test({"df4": df4_0}) - log.info("Error - dict with names not provided in the train dict (not in the data params dict)") - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - trend_global_local="global", - season_global_local="global", - ) - m.fit({"df1": df1_0, "df2": df2_0}, freq="D") - with pytest.raises(ValueError): - forecast = m.predict({"df4": df4_0}) - # log.info("unknown_data_normalization was not set to True") - with pytest.raises(ValueError): - metrics = m.test({"df4": df4_0}) - # log.info("unknown_data_normalization was not set to True") - with pytest.raises(ValueError): - forecast_trend = m.predict_trend({"df4": df4_0}) - # log.info("unknown_data_normalization was not set to True") - with pytest.raises(ValueError): - forecast_seasonal_componets = m.predict_seasonal_components({"df4": df4_0}) - # log.info("unknown_data_normalization was not set to True") - # Set unknown_data_normalization to True - now there should be no errors - m.config_normalization.unknown_data_normalization = True - forecast = m.predict({"df4": df4_0}) - metrics = m.test({"df4": df4_0}) - forecast_trend = m.predict_trend({"df4": df4_0}) - forecast_seasonal_componets = m.predict_seasonal_components({"df4": df4_0}) - m.plot_parameters(df_name="df1") - m.plot_parameters() - - def test_predict_raw(): df = pd.read_csv(PEYTON_FILE, nrows=NROWS) diff --git a/tests/test_unit.py b/tests/test_unit.py --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -104,7 +104,7 @@ def test_normalize(): learning_rate=LR, normalize="soft", ) - df, _, _, _, _ = df_utils.prep_or_copy_df(df) + df, _, _, _ = df_utils.prep_or_copy_df(df) # with config m.config_normalization.init_data_params(df, m.config_lagged_regressors, m.config_regressors, m.config_events) df_norm = m._normalize(df)
[enhancement] remove support for dicts of df as input We have been warning about dicts being deprecated as inputs. Now we remove support for them - simplifying the codebase in our next major release.
2022-11-02T21:54:24
ourownstory/neural_prophet
942
ourownstory__neural_prophet-942
[ "941" ]
cce32c0dc4fba7c0e79944342454d2def955e51c
diff --git a/neuralprophet/plot_forecast.py b/neuralprophet/plot_forecast.py --- a/neuralprophet/plot_forecast.py +++ b/neuralprophet/plot_forecast.py @@ -89,14 +89,14 @@ def plot( ] if highlight_forecast is None or line_per_origin: - for i, name in enumerate(reversed(yhat_col_names_no_qts)): + for i, name in enumerate(yhat_col_names_no_qts): ax.plot( ds, - fcst[name], + fcst[f"{colname}{i if line_per_origin else i + 1}"], ls="-", c="#0072B2", alpha=0.2 + 2.0 / (i + 2.5), - label=f"{colname}{i if line_per_origin else i + 1}", + label=name, ) if len(quantiles) > 1: diff --git a/neuralprophet/plot_forecast_plotly.py b/neuralprophet/plot_forecast_plotly.py --- a/neuralprophet/plot_forecast_plotly.py +++ b/neuralprophet/plot_forecast_plotly.py @@ -86,7 +86,7 @@ def plot(fcst, quantiles, xlabel="ds", ylabel="y", highlight_forecast=None, line data = [] if highlight_forecast is None or line_per_origin: - for i, yhat_col_name in enumerate(reversed(yhat_col_names_no_qts)): + for i, yhat_col_name in enumerate(yhat_col_names_no_qts): data.append( go.Scatter( name=yhat_col_name,
Plot labels of yhatx/origin-x are reversed for plotly *If yhats are plotted: yhat1 is the fcst predicted 1 step ago, being the most accurate --> should be darkest blue in plot *If origin-x are plotted (plot_latest_forecast): origin-0 is the very last forecast --> should be darkest blue in plot
2022-11-09T02:19:01
ourownstory/neural_prophet
961
ourownstory__neural_prophet-961
[ "938" ]
135e8b147633f5124c83a78693cc94f1d3d8ad91
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -295,6 +295,9 @@ class NeuralProphet: Options * ``True``: test data is normalized with global data params even if trained with local data params (global modeling with local normalization) * (default) ``False``: no global modeling with local normalization + accelerator: str + Name of accelerator from pytorch_lightning.accelerators to use for training. Use "auto" to automatically select an available accelerator. + Provide `None` to deactivate the use of accelerators. trainer_config: dict Dictionary of additional trainer configuration parameters. """ @@ -337,6 +340,7 @@ def __init__( global_normalization=False, global_time_normalization=True, unknown_data_normalization=False, + accelerator=None, trainer_config={}, ): kwargs = locals() @@ -401,6 +405,7 @@ def __init__( # Pytorch Lightning Trainer self.metrics_logger = MetricsLogger(save_dir=os.getcwd()) + self.accelerator = accelerator self.trainer_config = trainer_config self.trainer = None @@ -2377,6 +2382,7 @@ def _train(self, df, df_val=None, minimal=False, continue_training=False): config=self.trainer_config, metrics_logger=self.metrics_logger, early_stopping_target="Loss_val" if df_val is not None else "Loss", + accelerator=self.accelerator, minimal=minimal, num_batches_per_epoch=len(train_loader), ) @@ -2439,6 +2445,7 @@ def restore_trainer(self): config_train=self.config_train, config=self.trainer_config, metrics_logger=self.metrics_logger, + accelerator=self.accelerator, ) self.metrics = metrics.get_metrics(self.collect_metrics) diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -748,6 +748,7 @@ def configure_trainer( config: dict, metrics_logger, early_stopping_target: str = "Loss", + accelerator: str = None, minimal=False, num_batches_per_epoch=100, ): @@ -764,6 +765,8 @@ def configure_trainer( MetricsLogger object to log metrics to. early_stopping_target : str Target metric to use for early stopping. + accelerator : str + Accelerator to use for training. minimal : bool If True, no metrics are logged and no progress bar is displayed. num_batches_per_epoch : int @@ -789,6 +792,24 @@ def configure_trainer( if "default_root_dir" not in config.keys(): config["default_root_dir"] = os.getcwd() + # Accelerator + if isinstance(accelerator, str): + if (accelerator == "auto" and torch.cuda.is_available()) or accelerator == "gpu": + config["accelerator"] = "gpu" + config["devices"] = -1 + elif (accelerator == "auto" and hasattr(torch.backends, "mps")) or accelerator == "mps": + if torch.backends.mps.is_available(): + config["accelerator"] = "mps" + config["devices"] = 1 + elif accelerator != "auto": + config["accelerator"] = accelerator + config["devices"] = 1 + + if hasattr(config, "accelerator"): + log.info(f"Using accelerator {config['accelerator']} with {config['devices']} device(s).") + else: + log.info("No accelerator available. Using CPU for training.") + # Configure callbacks callbacks = []
[lightning] GPU support > Depends on the Lightning migration pr #837 Adds functionality to train the TimeNet model using an accelerator. Supports GPU, MPS (M1) automatically and all other available Pytorch Lightning accelerators via manual declaration (eg. TPU etc.). Closes #420
48a0e3d ## Model Benchmark | Benchmark | Metric | main | current | diff | | |---------------|-------------|-----------|-----------|---------|--------------------| | AirPassengers | MAE_val | 85.1099 | 15.2698 | -82.06% | :white_check_mark: | | AirPassengers | RMSE_val | 108.276 | 19.4209 | -82.06% | :white_check_mark: | | AirPassengers | Loss_val | nan | 0.00195 | 0.0% | :white_check_mark: | | AirPassengers | RegLoss_val | nan | 0 | 0.0% | :white_check_mark: | | AirPassengers | epoch | nan | 89 | 0.0% | :white_check_mark: | | AirPassengers | MAE | 6.35364 | 9.82902 | 54.7% | :warning: | | AirPassengers | RMSE | 7.68085 | 11.7005 | 52.33% | :warning: | | AirPassengers | Loss | 0.00023 | 0.00056 | 140.91% | :warning: | | AirPassengers | RegLoss | 0 | 0 | 0.0% | :white_check_mark: | | PeytonManning | MAE_val | 0.92518 | 0.64636 | -30.14% | :white_check_mark: | | PeytonManning | RMSE_val | 1.13074 | 0.79276 | -29.89% | :white_check_mark: | | PeytonManning | Loss_val | nan | 0.01494 | 0.0% | :white_check_mark: | | PeytonManning | RegLoss_val | nan | 0 | 0.0% | :white_check_mark: | | PeytonManning | epoch | nan | 37 | 0.0% | :white_check_mark: | | PeytonManning | MAE | 0.34839 | 0.42701 | 22.57% | :warning: | | PeytonManning | RMSE | 0.48617 | 0.57032 | 17.31% | :warning: | | PeytonManning | Loss | 0.00464 | 0.00635 | 36.95% | :warning: | | PeytonManning | RegLoss | 0 | 0 | 0.0% | :white_check_mark: | | YosemiteTemps | MAE_val | 1.71173 | 1.72949 | 1.04% | :white_check_mark: | | YosemiteTemps | RMSE_val | 2.2758 | 2.27386 | -0.08% | :white_check_mark: | | YosemiteTemps | Loss_val | nan | 0.00096 | 0.0% | :white_check_mark: | | YosemiteTemps | RegLoss_val | nan | 0 | 0.0% | :white_check_mark: | | YosemiteTemps | epoch | nan | 84 | 0.0% | :white_check_mark: | | YosemiteTemps | MAE | 1.43672 | 1.45189 | 1.06% | :white_check_mark: | | YosemiteTemps | RMSE | 2.14749 | 2.16631 | 0.88% | :white_check_mark: | | YosemiteTemps | Loss | 0.00064 | 0.00066 | 1.81% | :white_check_mark: | | YosemiteTemps | RegLoss | 0 | 0 | 0.0% | :white_check_mark: | ## Model Training ### PeytonManning ![](https://asset.cml.dev/1bcb0a2b86f9ab304053923a9e75153f71fc2c71?cml=svg%2Bxml&cache-bypass=66953d6f-abfc-47be-b9fd-266bc66e66cf) ### YosemiteTemps ![](https://asset.cml.dev/6f26923583523211d42f07709e43d90ac5e26bfe?cml=svg%2Bxml&cache-bypass=4430b11a-4456-4eb1-8d9c-6da1b42d08cb) ### AirPassengers ![](https://asset.cml.dev/6d445875bdcfb2cfde02f76d89cdf6fcbf0eafb7?cml=svg%2Bxml&cache-bypass=459303af-fcc9-446a-9ea3-d31b5012a4d3) ![](https://cml.dev/watermark.png "CML watermark") # [Codecov](https://codecov.io/gh/ourownstory/neural_prophet/pull/938?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) Report > :exclamation: No coverage uploaded for pull request base (`lightning@a1db9aa`). [Click here to learn what that means](https://docs.codecov.io/docs/error-reference?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe#section-missing-base-commit). > The diff coverage is `n/a`. ```diff @@ Coverage Diff @@ ## lightning #938 +/- ## ============================================ Coverage ? 87.12% ============================================ Files ? 17 Lines ? 4442 Branches ? 0 ============================================ Hits ? 3870 Misses ? 572 Partials ? 0 ``` Help us with your feedback. Take ten seconds to tell us [how you rate us](https://about.codecov.io/nps?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe). Have a feature suggestion? [Share it here.](https://app.codecov.io/gh/feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) @karl-richter One more question: Why did the performance metrics change so heavily? Anything wrong there?
2022-11-15T20:13:16
ourownstory/neural_prophet
978
ourownstory__neural_prophet-978
[ "818" ]
cce32c0dc4fba7c0e79944342454d2def955e51c
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -2808,7 +2808,7 @@ def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components): forecast = predicted[:, forecast_lag - 1, j] pad_before = self.max_lags + forecast_lag - 1 pad_after = self.n_forecasts - forecast_lag - yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after)) + yhat = np.concatenate(([np.NaN] * pad_before, forecast, [np.NaN] * pad_after)) # 0 is the median quantile index if j == 0: name = f"yhat{forecast_lag}" @@ -2834,7 +2834,7 @@ def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components): forecast = components[comp][:, forecast_lag - 1, j] # 0 is the median quantile pad_before = self.max_lags + forecast_lag - 1 pad_after = self.n_forecasts - forecast_lag - yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after)) + yhat = np.concatenate(([np.NaN] * pad_before, forecast, [np.NaN] * pad_after)) if j == 0: # temporary condition to add only the median component name = f"{comp}{forecast_lag}" df_forecast[name] = yhat @@ -2845,7 +2845,7 @@ def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components): for j in range(len(self.config_train.quantiles)): forecast_0 = components[comp][0, :, j] forecast_rest = components[comp][1:, self.n_forecasts - 1, j] - yhat = np.concatenate(([None] * self.max_lags, forecast_0, forecast_rest)) + yhat = np.concatenate(([np.NaN] * self.max_lags, forecast_0, forecast_rest)) if j == 0: # temporary condition to add only the median component # add yhat into dataframe, using df_forecast indexing yhat_df = pd.Series(yhat, name=comp).set_axis(df_forecast.index)
[bug] After m.predict(), forecast output contains object dtypes instead of float64 dtypes `Forecast` dataframe output contains `y`, `residual1`, `yhat1`, `ar1`, and so on. I expect that all of them have _float64_ dtype. However, all of them except `y` have _object_ dtype instead. This makes it more difficult to further process this data, for example aggregating. **Code:** ``` m = NeuralProphet(...) metrics = m.fit(df_train) forecast = m.predict(df_test) print(forecast.dtypes) ``` **Result:** ``` ds datetime64[ns] y float64 residual1 object yhat1 object ar1 object trend object season_daily object dtype: object ``` **Desired Result:** ``` ds datetime64[ns] y float64 residual1 float64 yhat1 float64 ar1 float64 trend float64 season_daily float64 dtype: float64 ``` Also, replace any `None` with `NaN`.
2022-11-19T06:46:02
ourownstory/neural_prophet
1,010
ourownstory__neural_prophet-1010
[ "1002" ]
499ae1adf66df5c96240946b56bc59e626a49e08
diff --git a/neuralprophet/custom_loss_metrics.py b/neuralprophet/custom_loss_metrics.py --- a/neuralprophet/custom_loss_metrics.py +++ b/neuralprophet/custom_loss_metrics.py @@ -34,10 +34,14 @@ def forward(self, outputs, target): target = target.repeat(1, 1, len(self.quantiles)) # increase the quantile dimension of the targets differences = target - outputs base_losses = self.loss_func(outputs, target) # dimensions - [n_batch, n_forecasts, no. of quantiles] - positive_losses = torch.tensor(self.quantiles).unsqueeze(dim=0).unsqueeze(dim=0) * base_losses - negative_losses = (1 - torch.tensor(self.quantiles).unsqueeze(dim=0).unsqueeze(dim=0)) * base_losses + positive_losses = ( + torch.tensor(self.quantiles, device=target.device).unsqueeze(dim=0).unsqueeze(dim=0) * base_losses + ) + negative_losses = ( + 1 - torch.tensor(self.quantiles, device=target.device).unsqueeze(dim=0).unsqueeze(dim=0) + ) * base_losses pinball_losses = torch.where(differences >= 0, positive_losses, negative_losses) - multiplier = torch.ones(size=(1, 1, len(self.quantiles))) + multiplier = torch.ones(size=(1, 1, len(self.quantiles)), device=target.device) multiplier[:, :, 0] = 2 pinball_losses = multiplier * pinball_losses # double the loss for the median quantile return pinball_losses diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -254,8 +254,11 @@ def __init__( self.config_trend.changepoints = self.config_trend.changepoints_range * linear_t else: self.config_trend.changepoints = np.insert(self.config_trend.changepoints, 0, 0.0) - self.trend_changepoints_t = torch.tensor( - self.config_trend.changepoints, requires_grad=False, dtype=torch.float + # Register in buffer so the tensor is moved to the correct device once initialized, + # https://pytorch-lightning.readthedocs.io/en/stable/starter/converting.html#remove-any-cuda-or-to-device-calls + self.register_buffer( + "trend_changepoints_t", + torch.tensor(self.config_trend.changepoints, requires_grad=False, dtype=torch.float), ) # Trend Deltas parameters @@ -513,7 +516,9 @@ def _compute_quantile_forecasts_from_diffs(self, diffs, predict_mode=False): if n_upper_quantiles > 0: # check if upper quantiles exist upper_quantile_diffs = diffs[:, :, quantiles_divider_index:] if predict_mode: # check for quantile crossing and correct them in predict mode - upper_quantile_diffs[:, :, 0] = torch.max(torch.tensor(0), upper_quantile_diffs[:, :, 0]) + upper_quantile_diffs[:, :, 0] = torch.max( + torch.tensor(0, device=self.device), upper_quantile_diffs[:, :, 0] + ) for i in range(n_upper_quantiles - 1): next_diff = upper_quantile_diffs[:, :, i + 1] diff = upper_quantile_diffs[:, :, i] @@ -525,7 +530,9 @@ def _compute_quantile_forecasts_from_diffs(self, diffs, predict_mode=False): if n_lower_quantiles > 0: # check if lower quantiles exist lower_quantile_diffs = diffs[:, :, 1:quantiles_divider_index] if predict_mode: # check for quantile crossing and correct them in predict mode - lower_quantile_diffs[:, :, -1] = torch.max(torch.tensor(0), lower_quantile_diffs[:, :, -1]) + lower_quantile_diffs[:, :, -1] = torch.max( + torch.tensor(0, device=self.device), lower_quantile_diffs[:, :, -1] + ) for i in range(n_lower_quantiles - 1, 0, -1): next_diff = lower_quantile_diffs[:, :, i - 1] diff = lower_quantile_diffs[:, :, i] @@ -769,7 +776,7 @@ def all_seasonalities(self, s, meta): torch.Tensor Forecast component of dims (batch, n_forecasts) """ - x = torch.zeros(size=(s[list(s.keys())[0]].shape[0], self.n_forecasts, len(self.quantiles))) + x = torch.zeros(size=(s[list(s.keys())[0]].shape[0], self.n_forecasts, len(self.quantiles)), device=self.device) for name, features in s.items(): x = x + self.seasonality(features, name, meta) return x @@ -924,17 +931,21 @@ def forward(self, inputs, meta=None): name_id_dummy = self.id_list[0] meta = OrderedDict() meta["df_name"] = [name_id_dummy for _ in range(inputs["time"].shape[0])] - meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]]) + meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) elif self.config_season is None: pass elif meta is None and self.config_season.global_local == "local": name_id_dummy = self.id_list[0] meta = OrderedDict() meta["df_name"] = [name_id_dummy for _ in range(inputs["time"].shape[0])] - meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]]) + meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) - additive_components = torch.zeros(size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles))) - multiplicative_components = torch.zeros(size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles))) + additive_components = torch.zeros( + size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles)), device=self.device + ) + multiplicative_components = torch.zeros( + size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles)), device=self.device + ) if "lags" in inputs: additive_components += self.auto_regression(lags=inputs["lags"]) @@ -1090,7 +1101,7 @@ def loss_func(self, inputs, predicted, targets): progress_in_epoch = 1 - ((steps_per_epoch * (self.current_epoch + 1) - self.global_step) / steps_per_epoch) loss, reg_loss = self._add_batch_regularizations(loss, self.current_epoch, progress_in_epoch) else: - reg_loss = torch.tensor(0.0) + reg_loss = torch.tensor(0.0, device=self.device) return loss, reg_loss def training_step(self, batch, batch_idx): diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -783,7 +783,7 @@ def configure_trainer( config["accelerator"] = accelerator config["devices"] = 1 - if hasattr(config, "accelerator"): + if "accelerator" in config: log.info(f"Using accelerator {config['accelerator']} with {config['devices']} device(s).") else: log.info("No accelerator available. Using CPU for training.")
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -1477,3 +1477,26 @@ def test_predict_raw(): metrics = m.fit(df, freq="D") future = m.make_future_dataframe(df, periods=30, n_historic_predictions=100) forecast = m.predict(df=future, raw=True) + + +def test_accelerator(): + log.info("testing: accelerator in Lightning (if available)") + df = pd.read_csv(PEYTON_FILE, nrows=NROWS) + m = NeuralProphet( + n_forecasts=2, + n_lags=14, + num_hidden_layers=2, + d_hidden=32, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + trend_reg=0.1, + quantiles=[0.1, 0.9], + accelerator="auto", + ) + df["A"] = df["y"].rolling(7, min_periods=1).mean() + cols = [col for col in df.columns if col not in ["ds", "y"]] + m = m.add_lagged_regressor(names=cols) + m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts) + metrics_df = m.fit(df, freq="D") + forecast = m.predict(df)
GPU support tensors NeuralProphet does seem to raise an error when training on GPU's. running with CUDA does not work: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! _Originally posted by @geneman in https://github.com/ourownstory/neural_prophet/issues/961#issuecomment-1331144742_
@geneman thanks for raising this issue, I will look into this.
2022-11-30T22:48:34
ourownstory/neural_prophet
1,029
ourownstory__neural_prophet-1029
[ "1027" ]
a8275179c5c9c9e27ddb36bb10a8002eb86ff4dc
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -2763,6 +2763,7 @@ def _maybe_extend_df(self, df): last_date=last_date, periods=periods_add[df_name], freq=self.data_freq, + config_events=self.config_events, ) future_df["ID"] = df_name df_i = pd.concat([df_i, future_df])
events_df is filled with NaNs at the end when predicting into known future Suppose we would like to add Events to the model df. If we are predicting into the known future, not using `make_future_dataframe()`, the df column y **and** the events column(s) are filled with NaN, according to `n_forecasts`. However, in the case of predictions into the unknown future, `make_future_dataframe()` prepares the df so that events columns are filled with zeros instead of NaNs. Code Sample: ``` # import packages import numpy as np import datetime import plotly import seaborn as sns import pandas as pd from neuralprophet import NeuralProphet from neuralprophet import set_random_seed set_random_seed(0) import warnings warnings.filterwarnings("ignore") ​ # create dummy data set for illustration df = pd.DataFrame({'ds': pd.date_range('2018-01-01', '2022-11-15')}) df['y'] = np.random.randint(1, 1000, df.shape[0]) ​ # create training and validating set. Validating on the last 3 months (92 days) df_train = df.iloc[:-92] df_test = df.iloc[-92:] ​ params={ 'n_changepoints': 15, 'changepoints_range': 0.95, 'yearly_seasonality': True, 'weekly_seasonality': True, 'n_lags': 20, 'n_forecasts': 10, 'learning_rate': 0.01, 'batch_size' : 8, } ​ m = NeuralProphet(**params) ​ # Set up an event DataFrame def events(): promo = pd.DataFrame({ 'event': 'Free_Ship', 'ds': pd.date_range(start='2021-11-25', end='2021-12-22'), }) lcd = pd.DataFrame({ 'event': 'LCD', 'ds': pd.date_range(start='2021-12-20', end='2021-12-30'), }) covid_wave1 = pd.DataFrame({ 'event': 'covid_wave1', 'ds': pd.date_range('2020-04-01',periods=29,freq='D'), }) covid_wave2 = pd.DataFrame({ 'event': 'covid_wave2', 'ds': pd.date_range('2020-07-24',periods=40,freq='D'), }) covid_wave3 = pd.DataFrame({ 'event': 'covid_wave3', 'ds': pd.date_range('2021-01-28',periods=34,freq='D'), }) covid_wave4 = pd.DataFrame({ 'event': 'covid_wave4', 'ds': pd.date_range('2021-07-24',periods=59,freq='D'), }) events_df = pd.concat( ( promo, lcd, covid_wave1, covid_wave2, covid_wave3, covid_wave4, ) ) events_df["ds"] = events_df["ds"].apply(lambda x: x.date()) events_df['ds'] = pd.to_datetime(events_df.ds) events_df["ds"] = events_df['ds'].dt.strftime('%Y-%m-%d') ​ return events_df ​ events_df = events() ​ # Add in events and customise events window m = m.add_events( [ 'Free_Ship', 'LCD', 'covid_wave1', 'covid_wave2', 'covid_wave3', 'covid_wave4' ], ) ​ df_train = m.create_df_with_events(df_train, events_df) df_test = m.create_df_with_events(df_test, events_df) ​ metrics = m.fit(df=df_train, freq='D', validation_df=df_test, progress="bar") ​ predict = m.predict(df_test) ```
2022-12-03T02:01:32
ourownstory/neural_prophet
1,059
ourownstory__neural_prophet-1059
[ "1054" ]
cbca03249381fc925c643f4cfa3afdf48ac6d997
diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -827,6 +827,8 @@ def configure_trainer( if progress_bar_enabled: prog_bar_callback = ProgressBar(refresh_rate=num_batches_per_epoch, epochs=config_train.epochs) callbacks.append(prog_bar_callback) + else: + config["enable_progress_bar"] = False # Early stopping monitor if early_stopping:
[bug] Progress bar is given out for every batch if minimal=True, causing a significant slow down Setting minimal=True currently causes a significant slow down of the training process, as the progress bar is given out for every batch.
2022-12-09T18:18:34
ourownstory/neural_prophet
1,083
ourownstory__neural_prophet-1083
[ "1076" ]
2f2907f17f138fe5671de6ef8237185257043d3e
diff --git a/neuralprophet/plot_forecast_plotly.py b/neuralprophet/plot_forecast_plotly.py --- a/neuralprophet/plot_forecast_plotly.py +++ b/neuralprophet/plot_forecast_plotly.py @@ -12,6 +12,7 @@ import plotly.express as px import plotly.graph_objs as go from plotly.subplots import make_subplots + from plotly_resampler import register_plotly_resampler except ImportError: log.error("Importing plotly failed. Interactive plots will not work.") @@ -39,6 +40,7 @@ "title": dict(font=dict(size=12)), "hovermode": "x unified", } +register_plotly_resampler(mode="auto") def plot(fcst, quantiles, xlabel="ds", ylabel="y", highlight_forecast=None, line_per_origin=False, figsize=(700, 210)): @@ -209,7 +211,6 @@ def plot(fcst, quantiles, xlabel="ds", ylabel="y", highlight_forecast=None, line **layout_args, ) fig = go.Figure(data=data, layout=layout) - return fig @@ -301,7 +302,7 @@ def plot_components(m, fcst, plot_configuration, df_name="__df__", one_period_pe yaxis.update(trace_object["yaxis"]) yaxis.update(**yaxis_args) for trace in trace_object["traces"]: - fig.add_trace(trace, j + 1, 1) + fig.add_trace(trace, row=j + 1, col=1) # adapt var name to plotly-resampler fig.update_layout(legend={"y": 0.1, "traceorder": "reversed"}) # Reset multiplicative axes labels after tight_layout adjustment diff --git a/neuralprophet/plot_model_parameters_plotly.py b/neuralprophet/plot_model_parameters_plotly.py --- a/neuralprophet/plot_model_parameters_plotly.py +++ b/neuralprophet/plot_model_parameters_plotly.py @@ -11,6 +11,7 @@ try: import plotly.graph_objs as go from plotly.subplots import make_subplots + from plotly_resampler import register_plotly_resampler except ImportError: log.error("Importing plotly failed. Interactive plots will not work.") @@ -34,6 +35,7 @@ "title": dict(font=dict(size=12)), "hovermode": "x unified", } +register_plotly_resampler(mode="auto") def get_dynamic_axis_range(df_range, type, pad=0.05, inverse=False): @@ -41,8 +43,8 @@ def get_dynamic_axis_range(df_range, type, pad=0.05, inverse=False): Parameters ---------- - df_range: list - List of axis values to pad + df_range: np.array + Array of axis values to pad type : str Type of values in the list to pad pad : float @@ -99,9 +101,9 @@ def plot_trend_change(m, quantile, plot_name="Trend Change", df_name="__df__"): start = data_params["ds"].shift scale = data_params["ds"].scale time_span_seconds = scale.total_seconds() - cp_t = [] + cp_t = np.array([]) for cp in m.model.config_trend.changepoints: - cp_t.append(start + datetime.timedelta(seconds=cp * time_span_seconds)) + cp_t = np.append(cp_t, start + datetime.timedelta(seconds=cp * time_span_seconds)) # Global/Local Mode if m.model.config_trend.trend_global_local == "local": quantile_index = m.model.quantiles.index(quantile) @@ -110,7 +112,7 @@ def plot_trend_change(m, quantile, plot_name="Trend Change", df_name="__df__"): quantile_index = m.model.quantiles.index(quantile) weights = m.model.get_trend_deltas.detach()[quantile_index, 0, :].numpy() # add end-point to force scale to match trend plot - cp_t.append(start + scale) + cp_t = np.append(cp_t, start + scale) weights = np.append(weights, [0.0]) traces = [] @@ -120,7 +122,7 @@ def plot_trend_change(m, quantile, plot_name="Trend Change", df_name="__df__"): x=cp_t, y=weights, marker_color=color, - ) + ), ) padded_range = get_dynamic_axis_range(cp_t, type="dt") @@ -314,7 +316,7 @@ def plot_scalar_weights(weights, plot_name, focus=None, multiplicative=False): traces.append( go.Bar( name=plot_name, - x=names, + x=np.array(names), y=values, marker_color=color, width=0.8, @@ -365,7 +367,7 @@ def plot_lagged_weights(weights, comp_name, focus=None): traces = [] n_lags = weights.shape[1] - lags_range = list(range(1, 1 + n_lags))[::-1] + lags_range = np.array(range(1, 1 + n_lags))[::-1] if focus is None: weights = np.sum(np.abs(weights), axis=0) weights = weights / np.sum(weights) @@ -584,7 +586,7 @@ def plot_weekly(m, quantile, comp_name="weekly", weekly_start=0, quick=True, mul traces.append( go.Scatter( name=comp_name + " Mean" if mean_std else comp_name, - x=list(range(len(days_i))), + x=np.array(range(len(days_i))), # x=df_w['ds'].dt.to_pydatetime(), y=predicted[comp_name], mode="lines", @@ -597,7 +599,7 @@ def plot_weekly(m, quantile, comp_name="weekly", weekly_start=0, quick=True, mul traces.append( go.Scatter( name="Quant 10%", - x=list(range(len(days_i))), + x=np.array(range(len(days_i))), y=predicted_q10[comp_name], mode="lines", line=dict(color="rgba(45, 146, 255, 0.2)", width=1), @@ -608,7 +610,7 @@ def plot_weekly(m, quantile, comp_name="weekly", weekly_start=0, quick=True, mul traces.append( go.Scatter( name="Quant 90%", - x=list(range(len(days_i))), + x=np.array(range(len(days_i))), y=predicted_q90[comp_name], fill=filling, mode="lines", @@ -790,7 +792,7 @@ def plot_custom_season(m, comp_name, quantile, multiplicative=False, df_name="__ fill="none", ) ) - padded_range = get_dynamic_axis_range(list(range(len(t_i))), type="numeric") + padded_range = get_dynamic_axis_range(t_i, type="numeric") xaxis = go.layout.XAxis( title=f"One period: {comp_name}", range=padded_range, @@ -946,6 +948,6 @@ def plot_parameters( xaxis.update(**xaxis_args) yaxis.update(**yaxis_args) for trace in trace_object["traces"]: - fig.add_trace(trace, i + 1, 1) + fig.add_trace(trace, row=i + 1, col=1) # adapt var name to plotly-resampler return fig
diff --git a/tests/test_plotting.py b/tests/test_plotting.py --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -26,7 +26,7 @@ PLOT = False # plot tests cover both plotting backends -decorator_input = ["plotting_backend", [("plotly"), ("matplotlib")]] +decorator_input = ["plotting_backend", [("matplotlib"), ("plotly")]] @pytest.mark.parametrize(*decorator_input) @@ -98,13 +98,13 @@ def test_plot_components(plotting_backend): # select components manually fig3 = m.plot_components(forecast, components=["autoregression"], plotting_backend="matplotlib") # select plotting components per period - fig4 = m.plot_components(forecast, one_period_per_season=True, plotting_backend="plotly") + fig4 = m.plot_components(forecast, one_period_per_season=True, plotting_backend=plotting_backend) log.info("Plot components with wrong component selection - Raise ValueError") with pytest.raises(ValueError): - m.plot_components(forecast, components=["quantiles"], plotting_backend="plotly") + m.plot_components(forecast, components=["quantiles"], plotting_backend=plotting_backend) with pytest.raises(ValueError): - m.plot_components(forecast, components=["trend123"], plotting_backend="plotly") + m.plot_components(forecast, components=["trend123"], plotting_backend=plotting_backend) if PLOT: fig1.show() @@ -139,13 +139,13 @@ def test_plot_parameters(plotting_backend): fig2 = m.plot_parameters(plotting_backend=plotting_backend) # select components manually - fig3 = m.plot_parameters(components="trend", plotting_backend="plotly") + fig3 = m.plot_parameters(components="trend", plotting_backend=plotting_backend) log.info("Plot parameters with wrong component selection - Raise ValueError") with pytest.raises(ValueError): - m.plot_parameters(components=["events"], plotting_backend="plotly") + m.plot_parameters(components=["events"], plotting_backend=plotting_backend) with pytest.raises(ValueError): - m.plot_parameters(components=["trend123"], plotting_backend="plotly") + m.plot_parameters(components=["trend123"], plotting_backend=plotting_backend) if PLOT: fig1.show() @@ -374,8 +374,8 @@ def test_plot_seasonality(plotting_backend): df = df[df["ds"].isin(bdays)] metrics_df = m.fit(df, freq="B") forecast = m.predict(df) - fig5 = m.plot_components(forecast, plotting_backend="plotly") - fig6 = m.plot_parameters(plotting_backend="plotly") + fig5 = m.plot_components(forecast, plotting_backend=plotting_backend) + fig6 = m.plot_parameters(plotting_backend=plotting_backend) if PLOT: fig1.show() @@ -524,10 +524,10 @@ def test_plot_uncertainty(plotting_backend): forecast = m.predict(future) log.info("Plot multi-steps ahead forecast without autoregression - Raise ValueError") with pytest.raises(ValueError): - m.plot(forecast, plotting_backend="plotly", forecast_in_focus=4) - m.plot_components(forecast, plotting_backend="plotly", forecast_in_focus=4) - m.plot_components(forecast, plotting_backend="plotly", forecast_in_focus=None) - m.plot_parameters(quantile=0.75, plotting_backend="plotly", forecast_in_focus=4) + m.plot(forecast, plotting_backend=plotting_backend, forecast_in_focus=4) + m.plot_components(forecast, plotting_backend=plotting_backend, forecast_in_focus=4) + m.plot_components(forecast, plotting_backend=plotting_backend, forecast_in_focus=None) + m.plot_parameters(quantile=0.75, plotting_backend=plotting_backend, forecast_in_focus=4) if PLOT: fig1.show()
Improve plotly interactive rendering performance Credits @LeonieFreisinger ## :microscope: Background For **large datasets**, the **plotly** plotting_backend entails **large computation time** (in comparison to matplotlib) for plotting a figure. Additionally, when using an interactive renderer the plotted interactive figure in jupyter notebooks causes a **screen freeze** or **lagged interaction**. **Goal:** Ideally, we will enable interactive rendering without lagged or freezing interaction. ## :crystal_ball: Key changes - introduce [plotly-resampler](https://github.com/predict-idlab/plotly-resampler) to **reduze the granuarity** of figures. Plotly-resampler downsamples the data respective to a view. - Downsampling is ensured for using the `'auto' `mode in all plotly-related `.py `files. --> `register_plotly_resampler(mode='auto')` - Important note: Input data needs to have a `dtype `--> `list `is not accepted as input data. --> Change `lists `to `np.arrays` ## :clipboard: Review Checklist - [ ] I have performed a self-review of my own code. - [ ] I have commented my code, added docstrings and data types to function definitions. - [ ] I have added pytests to check whether my feature / fix works. Please make sure to follow our best practices in the [Contributing guidelines](https://github.com/ourownstory/neural_prophet/blob/main/CONTRIBUTING.md).
2022-12-22T16:30:15
ourownstory/neural_prophet
1,096
ourownstory__neural_prophet-1096
[ "1093" ]
b91c3fe8ffa902204e989779dd7bcc6cc75cc3fa
diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -586,7 +586,9 @@ def fcst_df_to_latest_forecast(fcst, quantiles, n_last=1): df = pd.concat((fcst[cols],), axis=1) df.reset_index(drop=True, inplace=True) - yhat_col_names = [col_name for col_name in fcst.columns if "yhat" in col_name and "%" not in col_name] + yhat_col_names = [ + col_name for col_name in fcst.columns if "yhat" in col_name and "%" not in col_name and "qhat" not in col_name + ] yhat_col_names_quants = [col_name for col_name in fcst.columns if "yhat" in col_name and "%" in col_name] n_forecast_steps = len(yhat_col_names) yhats = pd.concat((fcst[yhat_col_names],), axis=1)
[bug] test_plot_conformal_prediction fails with auto-regression enabled on plot_latest_forecast Here is the code snippet from `test_plot_conformal_prediction` in the test_plotting.py file: ``` # With auto-regression enabled # TO-DO: Fix Assertion error n_train >= 1 m = NeuralProphet( n_forecasts=7, n_lags=14, quantiles=[0.05, 0.95], epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, ) train_df, test_df = m.split_df(df, freq="D", valid_p=0.2) train_df, cal_df = m.split_df(train_df, freq="D", valid_p=0.15) metrics_df = m.fit(train_df, freq="D") alpha = 0.1 for method in ["naive", "cqr"]: # Naive and CQR SCP methods future = m.make_future_dataframe(df, periods=m.n_forecasts, n_historic_predictions=10) forecast = m.conformal_predict(future, calibration_df=cal_df, alpha=alpha, method=method) m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts) fig0 = m.plot(forecast) fig1 = m.plot_latest_forecast(forecast, include_previous_forecasts=10, plotting_backend="matplotlib") fig2 = m.plot_latest_forecast(forecast, include_previous_forecasts=10, plot_history_data=True, plotting_backend="matplotlib") fig3 = m.plot_latest_forecast(forecast, include_previous_forecasts=10, plot_history_data=False, plotting_backend="matplotlib") fig4 = m.plot_components(forecast, plotting_backend="matplotlib") fig5 = m.plot_parameters(plotting_backend="matplotlib") if PLOT: fig0.show() fig1.show() fig2.show() fig3.show() fig4.show() fig5.show() ``` Running the test gets this gets the error: ``` FAILED tests/test_plotting.py::test_plot_conformal_prediction[matplotlib] - IndexError: index 5 is out of bounds for axis 1 with size 5 FAILED tests/test_plotting.py::test_plot_conformal_prediction[plotly] - IndexError: index 5 is out of bounds for axis 1 with size 5 ``` This is due to the `m.plot_latest_forecast()` method, where somewhere in the utils.py or df_utils.py file, the conformal predict column (e.g., `yhat1 5.0% + qhat1`) is not correctly mapped.
2023-01-05T03:17:43
ourownstory/neural_prophet
1,100
ourownstory__neural_prophet-1100
[ "1099" ]
88c47113e3d490224a8e65531fc3ad0c974e44e4
diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -508,7 +508,7 @@ def make_events_features(df, config_events: Optional[configure.ConfigEvents] = N np.array All multiplicative event features (both user specified and country specific) """ - + df = df.reset_index(drop=True) additive_events = pd.DataFrame() multiplicative_events = pd.DataFrame()
[bug] Adding country holidays triggers Error when using global/glocal model Adding country holidays with `m.add_country_holidays()` triggers an index error when the input df contains multiple time series (i.e. when glocal/global modeling is used). Resetting the index of the incoming time series in `TimeDataset.make_events_features()` resolves the problem.
2023-01-07T07:22:32
ourownstory/neural_prophet
1,109
ourownstory__neural_prophet-1109
[ "1053" ]
ebbe06a0e57932f54bdbe4dd3ce214bd737370fa
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -759,6 +759,9 @@ def fit( log.warning("Progress plot requires metrics to be enabled. Enabling the default metrics.") metrics = metrics.get_metrics(True) + if not self.config_normalization.global_normalization: + log.warning("When Global modeling with local normalization, metrics are displayed in normalized scale.") + if minimal: checkpointing = False self.metrics = False diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -1290,9 +1290,7 @@ def denormalize(self, ts): ------- denormalized timeseries """ - if not self.config_normalization.global_normalization: - log.warning("When Global modeling with local normalization, metrics are displayed in normalized scale.") - else: + if self.config_normalization.global_normalization: shift_y = ( self.config_normalization.global_data_params["y"].shift if self.config_normalization.global_normalization and not self.config_normalization.normalize == "off"
Global-local warning logged every training batch The log in denormalize() in time_net.py is logged every batch. When training on a large dataset, this results in thousands of logs. Could we simply remove it @alfonsogarciadecorral Line 1293 in `time_net.py` ```python if not self.config_normalization.global_normalization: log.warning("When Global modeling with local normalization, metrics are displayed in normalized scale.") else: ... ```
Hey Karl! I didn't create that bit of code, so maybe not the best person to answer :(
2023-01-11T22:01:29
ourownstory/neural_prophet
1,146
ourownstory__neural_prophet-1146
[ "1130" ]
75f7486cf1d43018abfe6d3a1b15edeed8922bfe
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -142,6 +142,7 @@ def data_params_definition( config_regressors=None, config_events: Optional[ConfigEvents] = None, config_seasonality: Optional[ConfigSeasonality] = None, + local_run_despite_global: Optional[bool] = None, ): """ Initialize data scaling values. @@ -215,9 +216,13 @@ def data_params_definition( for reg in config_regressors.keys(): if reg not in df.columns: raise ValueError(f"Regressor {reg} not found in DataFrame.") + norm_type = config_regressors[reg].normalize + if local_run_despite_global: + if len(df[reg].unique()) < 2: + norm_type = "soft" data_params[reg] = get_normalization_params( array=df[reg].values, - norm_type=config_regressors[reg].normalize, + norm_type=norm_type, ) if config_events is not None: for event in config_events.keys(): @@ -310,10 +315,17 @@ def init_data_params( ) # Compute individual data params local_data_params = OrderedDict() + local_run_despite_global = True if global_normalization else None for df_name, df_i in df.groupby("ID"): df_i.drop("ID", axis=1, inplace=True) local_data_params[df_name] = data_params_definition( - df_i, normalize, config_lagged_regressors, config_regressors, config_events, config_seasonality + df_i, + normalize, + config_lagged_regressors, + config_regressors, + config_events, + config_seasonality, + local_run_despite_global, ) if global_time_normalization: # Overwrite local time normalization data_params with global values (pointer) @@ -439,14 +451,13 @@ def check_single_dataframe(df, check_y, covariates, regressors, events, seasonal raise ValueError("Column ds has timezone specified, which is not supported. Remove timezone.") if len(df.ds.unique()) != len(df.ds): raise ValueError("Column ds has duplicate values. Please remove duplicates.") - regressors_to_remove = [] if regressors is not None: for reg in regressors: if len(df[reg].unique()) < 2: log.warning( - "Encountered future regressor with only unique values in training set. Automatically removed variable." + "Encountered future regressor with only unique values in training set. " + "Variable will be removed for global modeling if this is true for all time series." ) - regressors_to_remove.append(reg) columns = [] if check_y: @@ -489,7 +500,7 @@ def check_single_dataframe(df, check_y, covariates, regressors, events, seasonal df.index.name = None df = df.sort_values("ds") df = df.reset_index(drop=True) - return df, regressors_to_remove + return df def check_dataframe( @@ -521,14 +532,20 @@ def check_dataframe( """ df, _, _, _ = prep_or_copy_df(df) checked_df = pd.DataFrame() - regressors_to_remove = [] for df_name, df_i in df.groupby("ID"): - df_aux, reg = check_single_dataframe(df_i, check_y, covariates, regressors, events, seasonalities) + df_aux = check_single_dataframe(df_i, check_y, covariates, regressors, events, seasonalities) df_aux = df_aux.copy(deep=True) - if len(reg) > 0: - regressors_to_remove.append(*reg) df_aux["ID"] = df_name checked_df = pd.concat((checked_df, df_aux), ignore_index=True) + regressors_to_remove = [] + if regressors is not None: + for reg in regressors: + if len(df[reg].unique()) < 2: + log.warning( + "Encountered future regressor with only unique values in training set across all IDs." + "Automatically removed variable." + ) + regressors_to_remove.append(reg) if len(regressors_to_remove) > 0: regressors_to_remove = list(set(regressors_to_remove)) checked_df = checked_df.drop(*regressors_to_remove, axis=1)
Automatic Future Regressor Removal with Global Model **Describe the bug** When training a global model (multiple series identified by the ID column) with future regressors, NeuralProphet automatically removes the specified future regressor if it's constant on one of those series. However, while it might be constant / not useful for one particular series (ID group), it might carry relevant for other series. For example, when forecasting electrical consumption, we can include estimated PV generation - which will be 0 for houses that don't have a PV installation, and different from that for houses that do have it. While browsing through the code, I have narrowed the issue to this line of code (that removes a regressor if it's constant only across one of the series): https://github.com/ourownstory/neural_prophet/blob/main/neuralprophet/df_utils.py#L449 For some reason, this regressor removal is only activated if a `validation_df` is supplied during the model fitting (and not otherwise). **To Reproduce** Steps to reproduce the behavior: ``` from neuralprophet import NeuralProphet import pandas as pd import datetime n_samples = 100 start = datetime.datetime(2023, 1, 1) one_day = datetime.timedelta(days=1) df = pd.DataFrame({ 'y': [float(x) for x in range(n_samples)]*2, 'fut_reg_1': [float(x)**2 for x in range(n_samples)] * 2, 'fut_reg_2': [float(x)**0.5 for x in range(n_samples)] * 2, 'ID': ['a']*n_samples + ['b']*n_samples, 'ds': [start + x*one_day for x in range(n_samples)] * 2 }) df.loc[df.ID == 'b', 'fut_reg_2'] = 0 model = NeuralProphet( growth="off", daily_seasonality=False, yearly_seasonality=False, weekly_seasonality=5, n_lags=4, ar_reg=1, learning_rate=0.01, ) df_train, df_val = model.split_df(df, valid_p=0.2) model.add_future_regressor(name='fut_reg_1') model.add_future_regressor(name='fut_reg_2') print(model.config_regressors) metrics = model.fit( df=df_train, validation_df=df_val, freq='1D', ) ``` **Expected behavior** When training a global model, NeuralProphet should only remove regressors if they are constant across all series, and not if they are only constant across some of the series. **What actually happens** The future regressor gets automatically removed, which, in return, raises an exception (`ValueError: Unexpected column fut_reg_2 in data`). **Screenshots** ``` INFO - (NP.df_utils._infer_frequency) - Major frequency D corresponds to 99.0% of the data. INFO - (NP.df_utils._infer_frequency) - Dataframe freq automatically defined as D INFO - (NP.df_utils._infer_frequency) - Major frequency D corresponds to 99.0% of the data. INFO - (NP.df_utils._infer_frequency) - Dataframe freq automatically defined as D WARNING - (NP.forecaster.fit) - When Global modeling with local normalization, metrics are displayed in normalized scale. WARNING - (NP.df_utils.check_single_dataframe) - Encountered future regressor with only unique values in training set. Automatically removed variable. WARNING - (NP.forecaster._check_dataframe) - Removing regressor fut_reg_2 because it is not present in the data. INFO - (NP.df_utils._infer_frequency) - Major frequency D corresponds to 98.765% of the data. INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - 1D INFO - (NP.df_utils._infer_frequency) - Major frequency D corresponds to 98.765% of the data. INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - 1D INFO - (NP.config.set_auto_batch_epoch) - Auto-set batch_size to 16 INFO - (NP.config.set_auto_batch_epoch) - Auto-set epochs to 419 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Input In [30], in <cell line: 38>() 34 model.add_future_regressor(name='fut_reg_2') 36 print(model.config_regressors) ---> 38 metrics = model.fit( 39 df=df_train, 40 validation_df=df_val, 41 freq='1D', 42 ) File C:\Anaconda3\envs\dev_3_10\lib\site-packages\neuralprophet\forecaster.py:821, in NeuralProphet.fit(self, df, freq, validation_df, epochs, batch_size, learning_rate, early_stopping, minimal, metrics, progress, checkpointing, continue_training, num_workers) 819 df_val = self._check_dataframe(df_val, check_y=False, exogenous=False) 820 df_val = self._handle_missing_data(df_val, freq=self.data_freq) --> 821 metrics_df = self._train( 822 df, 823 df_val=df_val, 824 progress_bar_enabled=bool(progress), 825 metrics_enabled=bool(self.metrics), 826 checkpointing_enabled=checkpointing, 827 continue_training=continue_training, 828 num_workers=num_workers, 829 ) 831 # Show training plot 832 if progress == "plot": File C:\Anaconda3\envs\dev_3_10\lib\site-packages\neuralprophet\forecaster.py:2609, in NeuralProphet._train(self, df, df_val, progress_bar_enabled, metrics_enabled, checkpointing_enabled, continue_training, num_workers) 2607 if df_val is not None: 2608 df_val, _, _, _ = df_utils.prep_or_copy_df(df_val) -> 2609 val_loader = self._init_val_loader(df_val) 2611 # Init the model, if not continue from checkpoint 2612 if continue_training: File C:\Anaconda3\envs\dev_3_10\lib\site-packages\neuralprophet\forecaster.py:2562, in NeuralProphet._init_val_loader(self, df) 2550 """Executes data preparation steps and initiates evaluation procedure. 2551 2552 Parameters (...) 2559 torch DataLoader 2560 """ 2561 df, _, _, _ = df_utils.prep_or_copy_df(df) -> 2562 df = self._normalize(df) 2563 dataset = self._create_dataset(df, predict_mode=False) 2564 loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False) File C:\Anaconda3\envs\dev_3_10\lib\site-packages\neuralprophet\forecaster.py:2496, in NeuralProphet._normalize(self, df) 2494 data_params = self.config_normalization.get_data_params(df_name) 2495 df_i.drop("ID", axis=1, inplace=True) -> 2496 df_aux = df_utils.normalize(df_i, data_params).copy(deep=True) 2497 df_aux["ID"] = df_name 2498 df_norm = pd.concat((df_norm, df_aux), ignore_index=True) File C:\Anaconda3\envs\dev_3_10\lib\site-packages\neuralprophet\df_utils.py:393, in normalize(df, data_params) 391 for name in df.columns: 392 if name not in data_params.keys(): --> 393 raise ValueError(f"Unexpected column {name} in data") 394 new_name = name 395 if name == "ds": ValueError: Unexpected column fut_reg_2 in data ``` **Environement (please complete the following information):** - Python environment [Python 3.10, in standalone anaconda env with no other packages] - NeuralProphet version and install method [0.5.1, installed from PYPI with `pip install neuralprophet==0.5.1`]
@milannnnn This is a valid point. Thank you for raising it! @judussoari We should indeed only remove a variable if it is single-valued across all series in a panel dataset (unless the comonent were to be set to be local). We may also want to de-escalate the Error to a Warning if unused columns are passed.
2023-02-04T22:39:36
ourownstory/neural_prophet
1,154
ourownstory__neural_prophet-1154
[ "1147" ]
05198bae7e0ada2644115c75999cc8a0fdbe4408
diff --git a/neuralprophet/configure.py b/neuralprophet/configure.py --- a/neuralprophet/configure.py +++ b/neuralprophet/configure.py @@ -383,6 +383,8 @@ class LaggedRegressor: as_scalar: bool normalize: Union[bool, str] n_lags: int + num_hidden_layers: Optional[int] + d_hidden: Optional[int] def __post_init__(self): if self.reg_lambda is not None: diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -448,6 +448,8 @@ def add_lagged_regressor( self, names: Union[str, List[str]], n_lags: Union[int, np_types.Literal["auto", "scalar"]] = "auto", + num_hidden_layers: Optional[int] = None, + d_hidden: Optional[int] = None, regularization: Optional[float] = None, normalize: Union[bool, str] = "auto", ): @@ -463,12 +465,20 @@ def add_lagged_regressor( previous regressors time steps to use as input in the predictor (covar order) if ``auto``, time steps will be equivalent to the AR order (default) if ``scalar``, all the regressors will only use last known value as input + num_hidden_layers : int + number of hidden layers to include in Lagged-Regressor-Net (defaults to same configuration as AR-Net) + d_hidden : int + dimension of hidden layers of the Lagged-Regressor-Net. Ignored if ``num_hidden_layers`` == 0. regularization : float optional scale for regularization strength normalize : bool optional, specify whether this regressor will benormalized prior to fitting. if ``auto``, binary regressors will not be normalized. """ + if num_hidden_layers is None: + num_hidden_layers = self.config_model.num_hidden_layers + if d_hidden is None: + d_hidden = self.config_model.d_hidden if n_lags == 0 or n_lags is None: n_lags = 0 log.warning( @@ -502,6 +512,8 @@ def add_lagged_regressor( normalize=normalize, as_scalar=only_last_value, n_lags=n_lags, + num_hidden_layers=num_hidden_layers, + d_hidden=d_hidden, ) return self diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -303,17 +303,17 @@ def __init__( for covar in self.config_lagged_regressors.keys(): covar_net = nn.ModuleList() d_inputs = self.config_lagged_regressors[covar].n_lags - for i in range(self.num_hidden_layers): + for i in range(self.config_lagged_regressors[covar].num_hidden_layers): d_hidden = ( max( 4, round( (self.config_lagged_regressors[covar].n_lags + n_forecasts) - / (2.0 * (num_hidden_layers + 1)) + / (2.0 * (self.config_lagged_regressors[covar].num_hidden_layers + 1)) ), ) - if d_hidden is None - else d_hidden + if self.config_lagged_regressors[covar].d_hidden is None + else self.config_lagged_regressors[covar].d_hidden ) covar_net.append(nn.Linear(d_inputs, d_hidden, bias=True)) d_inputs = d_hidden @@ -499,7 +499,7 @@ def covariate(self, lags, name): Forecast component of dims (batch, n_forecasts) """ x = lags - for i in range(self.num_hidden_layers + 1): + for i in range(self.config_lagged_regressors[name].num_hidden_layers + 1): if i > 0: x = nn.functional.relu(x) x = self.covar_nets[name][i](x)
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -378,7 +378,7 @@ def test_lag_reg(): ) df["A"] = df["y"].rolling(7, min_periods=1).mean() df["B"] = df["y"].rolling(30, min_periods=1).mean() - m = m.add_lagged_regressor(names="A") + m = m.add_lagged_regressor(names="A", n_lags=12, num_hidden_layers=4, d_hidden=16) m = m.add_lagged_regressor(names="B") metrics_df = m.fit(df, freq="D") future = m.make_future_dataframe(df, n_historic_predictions=10)
Allow own Neural network configuration for lagged regressors Currently, when adding lagged regressors to the model, they follow the same configuration for `num_hidden_layers` and `d_hidden` as the AR-net specified for autoregression. We would like to change that, adding an individual configuration in `m.add_lagged_regressor()`
2023-02-09T04:22:43
ourownstory/neural_prophet
1,175
ourownstory__neural_prophet-1175
[ "252" ]
8598fe5821d605221b655b189e4acd22434d25a9
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -340,6 +340,9 @@ def __init__( accelerator: Optional[str] = None, trainer_config: dict = {}, ): + self.config = locals() + self.config.pop("self") + # General self.name = "NeuralProphet" self.n_forecasts = n_forecasts @@ -517,6 +520,23 @@ def add_lagged_regressor( ) return self + def parameters(self): + return self.config + + def state_dict(self): + return { + "data_freq": self.data_freq, + "fitted": self.fitted, + "data_params": self.data_params, + "optimizer": self.config_train.optimizer, + "scheduler": self.config_train.scheduler, + "model": self.model, + "future_periods": self.future_periods, + "predict_steps": self.predict_steps, + "highlight_forecast_step_n": self.highlight_forecast_step_n, + "true_ar_weights": self.true_ar_weights, + } + def add_future_regressor(self, name, regularization=None, normalize="auto", mode="additive"): """Add a regressor as lagged covariate with order 1 (scalar) or as known in advance (also scalar).
add model.get_params and set_params similar to scikit model.get_params for scenario testing or for parameter-based save / load functions
see discussion #238 Hello @ourownstory, Can you assign this issue to me? Hi @ourownstory, @anastasiia-tovstolis, with our ongoing migration to Pytorch Lightning (and I think also in plain pytorch) you can access the model parameters using `m.model.state_dict()`. This yields the trained parameters such as bias, trend etc. Further you can use `load_state_dict()` to set the parameters of the model. Not sure if this is exactly what you are looking for, but it seems very related. The output looks roughly as follows ```python { 'bias': tensor([0.7926]) 'trend_k0': tensor([-0.3043]) 'trend_deltas': tensor([[1.6712, 0.7629, 1.5933, 0.4826, 1.2354, 1.0626, 1.3238, 1.1458, 1.6141, 0.1156, 1.1982]]) 'ar_net.0.weight': tensor([[1.8130, 1.6902, 0.5619], [0.3293, 0.8004, 0.9695]]) 'covar_nets.A.0.weight': tensor([[0.7283, 1.5743, 0.9238] } ``` Hi @ourownstory, I would like to clarify if this issue is still relevant in connection with the comment @karl-richter?
2023-02-27T23:45:59
ourownstory/neural_prophet
1,254
ourownstory__neural_prophet-1254
[ "1168" ]
347f783b81575b1a5ac6dd977650e9441b17e493
diff --git a/neuralprophet/components/trend/static.py b/neuralprophet/components/trend/static.py --- a/neuralprophet/components/trend/static.py +++ b/neuralprophet/components/trend/static.py @@ -28,7 +28,7 @@ def forward(self, t, meta): torch.Tensor Trend component, same dimensions as input t """ - return self.bias.unsqueeze(dim=0).repeat(t.shape[0], 1, 1) + return self.bias.unsqueeze(dim=0).repeat(t.shape[0], self.n_forecasts, 1) @property def get_trend_deltas(self):
Static Trend does not follow required forecasting pattern **Source Code:** ``` import pandas as pd from neuralprophet import NeuralProphet, set_log_level, set_random_seed set_log_level("ERROR") data_location = "https://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/" file = "energy/SF_hospital_load.csv" data_df = pd.read_csv(data_location + file) m = NeuralProphet() train_df, test_df = m.split_df(data_df, freq="H", valid_p=1.0 / 5) quantile_lo, quantile_hi = 0.05, 0.95 quantiles = [quantile_lo, quantile_hi] n_lags = 3 * 24 n_forecasts = 24 m = NeuralProphet( growth="off", yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, n_forecasts = n_forecasts, n_lags=n_lags, ar_reg=1, learning_rate=0.01, quantiles=quantiles, ) random_seed = 0 set_random_seed(random_seed) metrics = m.fit(train_df, freq="H") forecast = m.predict(test_df) ``` **Error:** ``` --------------------------------------------------------------------------- IndexError Traceback (most recent call last) Input In [1], in <cell line: 34>() 31 metrics = m.fit(train_df, freq="H") 32 metrics.tail(1) ---> 34 forecast = m.predict(test_df) File ~.../neuralprophet/forecaster.py:940, in NeuralProphet.predict(self, df, decompose, raw) 938 fcst = fcst[:-1] 939 else: --> 940 fcst = self._reshape_raw_predictions_to_forecst_df(df_i, predicted, components) 941 if periods_added[df_name] > 0: 942 fcst = fcst[: -periods_added[df_name]] File ~.../neuralprophet/forecaster.py:3169, in NeuralProphet._reshape_raw_predictions_to_forecst_df(self, df, predicted, components) 3167 for j in range(len(self.config_train.quantiles)): 3168 forecast_0 = components[comp][0, :, j] -> 3169 forecast_rest = components[comp][1:, self.n_forecasts - 1, j] 3170 yhat = np.concatenate(([np.NaN] * self.max_lags, forecast_0, forecast_rest)) 3171 if j == 0: # temporary condition to add only the median component 3172 # add yhat into dataframe, using df_forecast indexing IndexError: index 23 is out of bounds for axis 1 with size 1 ``` Is it because I used the empty `m = NeuralProphet()` to split to data to train/test?
This is definitely a bug. By testing somethings out I found out that whenever we run NP with Autoregression `n_lags>0` and multi-step-ahead forecasts `n_forecasts>1` for some reason, NP always tries to fit a trend. If the user, however, opts to not fit a trend `growth='off'`, we get this error. We get this error also if we do not add any quantiles! Does this have anything to do with trend modularization @karl-richter ? Update on this: The `StaticTrend()` class (called when `growth='off`) in `static.py` does not follow the forecasting pattern of all the other components. Example: PiecewiseLinear() (when growth not off) forecasts for every sample, and ends up with a shape of `(batch_size, n_forecasts, quantiles)`. Static trend is in shape `(batch_size, 1, quantiles)`. Although the static trend is constant, we need it to be in the same shape as the other components. Can we change this @karl-richter ? :) I got the same issue while using the global local model with n_forecasts > 1 and n_lags > 1 as @judussoari mentioned. Have added a workaround https://github.com/Terabyte17/neural_prophet/blob/main/neuralprophet/forecaster.py#L3265 here for my own use case, so this might be helpful till this gets resolved. > Update on this: The `StaticTrend()` class (called when `growth='off`) in `static.py` does not follow the forecasting pattern of all the other components. Example: PiecewiseLinear() (when growth not off) forecasts for every sample, and ends up with a shape of `(batch_size, n_forecasts, quantiles)`. Static trend is in shape `(batch_size, 1, quantiles)`. Although the static trend is constant, we need it to be in the same shape as the other components. Can we change this @karl-richter ? :) Has this Issue been addressed yet? @karl-richter @judussoari
2023-04-11T04:22:15
ourownstory/neural_prophet
1,256
ourownstory__neural_prophet-1256
[ "1153" ]
658347318cd7106268e4128b3b6ed260c9fbf391
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -208,9 +208,13 @@ def data_params_definition( for covar in config_lagged_regressors.keys(): if covar not in df.columns: raise ValueError(f"Lagged regressor {covar} not found in DataFrame.") + norm_type_lag = config_lagged_regressors[covar].normalize + if local_run_despite_global: + if len(df[covar].unique()) < 2: + norm_type_lag = "soft" data_params[covar] = get_normalization_params( array=df[covar].values, - norm_type=config_lagged_regressors[covar].normalize, + norm_type=norm_type_lag, ) if config_regressors is not None: @@ -457,6 +461,13 @@ def check_single_dataframe(df, check_y, covariates, regressors, events, seasonal "Encountered future regressor with only unique values in training set. " "Variable will be removed for global modeling if this is true for all time series." ) + if covariates is not None: + for covar in covariates: + if len(df[covar].unique()) < 2: + log.warning( + "Encountered lagged regressor with only unique values in training set. " + "Variable will be removed for global modeling if this is true for all time series." + ) columns = [] if check_y: @@ -503,7 +514,13 @@ def check_single_dataframe(df, check_y, covariates, regressors, events, seasonal def check_dataframe( - df: pd.DataFrame, check_y: bool = True, covariates=None, regressors=None, events=None, seasonalities=None + df: pd.DataFrame, + check_y: bool = True, + covariates=None, + regressors=None, + events=None, + seasonalities=None, + future: Optional[bool] = None, ) -> Tuple[pd.DataFrame, List]: """Performs basic data sanity checks and ordering, as well as prepare dataframe for fitting or predicting. @@ -523,6 +540,8 @@ def check_dataframe( event column names seasonalities : list or dict seasonalities column names + future : bool + if df is a future dataframe Returns ------- @@ -545,6 +564,16 @@ def check_dataframe( "Automatically removed variable." ) regressors_to_remove.append(reg) + if future: + return checked_df, regressors_to_remove + if covariates is not None: + for covar in covariates: + if len(df[covar].unique()) < 2: + log.warning( + "Encountered lagged regressor with only unique values in training set across all IDs." + "Automatically removed variable." + ) + regressors_to_remove.append(covar) if len(regressors_to_remove) > 0: regressors_to_remove = list(set(regressors_to_remove)) checked_df = checked_df.drop(*regressors_to_remove, axis=1)
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -960,27 +960,35 @@ def test_global_modeling_with_lagged_regressors(): df2["A"] = df2["y"].rolling(10, min_periods=1).mean() df3["A"] = df3["y"].rolling(40, min_periods=1).mean() df4["A"] = df4["y"].rolling(20, min_periods=1).mean() + df5 = df2.copy(deep=True) + df5["A"] = 1 + df6 = df4.copy(deep=True) + df6["A"] = 1 df1["ID"] = "df1" df2["ID"] = "df2" df3["ID"] = "df1" df4["ID"] = "df2" + df5["ID"] = "df2" + df6["ID"] = "df2" future_regressors_df3 = pd.DataFrame(data={"A": df3["A"].iloc[:30]}) future_regressors_df4 = pd.DataFrame(data={"A": df4["A"].iloc[:40]}) future_regressors_df3["ID"] = "df1" future_regressors_df4["ID"] = "df2" - train_input = {0: df1, 1: pd.concat((df1, df2)), 2: pd.concat((df1, df2))} - test_input = {0: df3, 1: df3, 2: pd.concat((df3, df4))} + train_input = {0: df1, 1: pd.concat((df1, df2)), 2: pd.concat((df1, df2)), 3: pd.concat((df1, df5))} + test_input = {0: df3, 1: df3, 2: pd.concat((df3, df4)), 3: pd.concat((df3, df6))} regressors_input = { 0: future_regressors_df3, 1: future_regressors_df3, 2: pd.concat((future_regressors_df3, future_regressors_df4)), + 3: pd.concat((future_regressors_df3, future_regressors_df4)), } info_input = { 0: "Testing single ts df train / single ts df test - single df regressors, no events", 1: "Testing many ts df train / many ts df test - single df regressors, no events", 2: "Testing many ts df train / many ts df test - many df regressors, no events", + 3: "Testing lagged regressor with only unique values", } - for i in range(0, 3): + for i in range(0, 4): log.info(info_input[i]) m = NeuralProphet( n_lags=5, @@ -990,6 +998,7 @@ def test_global_modeling_with_lagged_regressors(): learning_rate=LR, trend_global_local="global", season_global_local="global", + global_normalization=True if i == 3 else False, ) m = m.add_lagged_regressor(names="A") metrics = m.fit(train_input[i], freq="D")
When calling make_future_dataframe do not check for unique values of regressor ### Discussed in https://github.com/ourownstory/neural_prophet/discussions/1112 <div type='discussions-op-text'> <sup>Originally posted by **sivikt** January 12, 2023</sup> Hi All! Great library - loving it so far. I observe a strange thing. Starting from the 0.4.2 the function mode.make_future_dataframe(...) silently changes the model state: removing predefined regressors. Consider the code. It creates a model with 1 future regressor **r_lagged** and then trying to make a future using **make_future_dataframe** function. Result contains columns **r_lagged** but with NaNs for the last 5 past data points which is strange since the whole past is provided. ```python ## pip install neuralprophet==0.4.2 import numpy as np import pandas as pd from neuralprophet import NeuralProphet from neuralprophet import set_random_seed set_random_seed(0) data_location = "https://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/" df = pd.read_csv(data_location + 'wp_log_peyton_manning.csv') df['r_lagged'] = np.random.choice(5, len(df)) df.loc[df[-20:].index, 'r_lagged'] = 5 m = NeuralProphet(n_forecasts=10, n_lags=5, epochs=2) m = m.add_future_regressor(name='r_lagged') metrics = m.fit(df, freq='D') last_date = pd.to_datetime(df["ds"].copy(deep=True).dropna()).sort_values().max() future_dates = pd.date_range(start=last_date, periods=12, freq='D') future_dates = future_dates[future_dates > last_date] future_df = pd.DataFrame({"ds": future_dates}) future_df['r_lagged'] = np.random.choice(5, len(future_df)) future = m.make_future_dataframe(df=df, periods=10, regressors_df=future_df, n_historic_predictions=False) print(future_df) print(future) ``` | idx | ds | y | r_lagged | | - | - | - | - | | 0 | 2016-01-16 | 7.8172 | NaN | | 1 | 2016-01-17 | 9.2739 | NaN | | 2 | 2016-01-18 | 10.3338 | NaN | | 3 | 2016-01-19 | 9.1259 | NaN | | 4 | 2016-01-20 | 8.8914 | NaN | | 5 | 2016-01-21 | NaN | 0.0 | | 6 | 2016-01-22 | NaN | 0.0 | | 7 | 2016-01-23 | NaN | 1.0 | | 8 | 2016-01-24 | NaN | 0.0 | | 9 | 2016-01-25 | NaN | 4.0 | | 10 | 2016-01-26 | NaN | 3.0 | | 11 | 2016-01-27 | NaN | 3.0 | | 12 | 2016-01-28 | NaN | 4.0 | | 13 | 2016-01-29 | NaN | 2.0 | | 14 | 2016-01-30 | NaN | 3.0 | It is because of **check_single_dataframe** gets the last past historic data points where **r_lagged** has the only value 5. ``` check_single_dataframe, df_utils.py:455 check_dataframe, df_utils.py:519 _check_dataframe, forecaster.py:2155 _make_future_dataframe, forecaster.py:2765 make_future_dataframe, forecaster.py:1246 ``` I do not quite understand why it so having trained model with multiple past regressor unique. Why simple utility function changes the model state, it was not the case in the previous versions. One can see that **check_single_dataframe** marks a regressor as "to be removed" if the regressor has only 1 unique value but it considers only N past points (N lags) putting aside the **future_df**. What do you think? Should we consider opening an issue/PR to adjust the behavior or there some hidden algorithm intrinsics which require such model state manipulation? </div>
2023-04-11T05:08:31
ourownstory/neural_prophet
1,272
ourownstory__neural_prophet-1272
[ "1227", "1250" ]
e12f23e8925b037b9d9d9afdab92c06cfe5ae3a2
diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -299,7 +299,7 @@ def _check_dataframe( checked dataframe """ df, _, _, _ = df_utils.prep_or_copy_df(df) - df, regressors_to_remove = df_utils.check_dataframe( + df, regressors_to_remove, lag_regressors_to_remove = df_utils.check_dataframe( df=df, check_y=check_y, covariates=model.config_lagged_regressors if exogenous else None, @@ -312,6 +312,14 @@ def _check_dataframe( for reg in regressors_to_remove: log.warning(f"Removing regressor {reg} because it is not present in the data.") model.config_regressors.pop(reg) + if len(model.config_regressors) == 0: + model.config_regressors = None + if model.config_lagged_regressors is not None: + for reg in lag_regressors_to_remove: + log.warning(f"Removing lagged regressor {reg} because it is not present in the data.") + model.config_lagged_regressors.pop(reg) + if len(model.config_lagged_regressors) == 0: + model.config_lagged_regressors = None return df diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -521,7 +521,7 @@ def check_dataframe( events=None, seasonalities=None, future: Optional[bool] = None, -) -> Tuple[pd.DataFrame, List]: +) -> Tuple[pd.DataFrame, List, List]: """Performs basic data sanity checks and ordering, as well as prepare dataframe for fitting or predicting. @@ -556,6 +556,7 @@ def check_dataframe( df_aux["ID"] = df_name checked_df = pd.concat((checked_df, df_aux), ignore_index=True) regressors_to_remove = [] + lag_regressors_to_remove = [] if regressors is not None: for reg in regressors: if len(df[reg].unique()) < 2: @@ -564,8 +565,6 @@ def check_dataframe( "Automatically removed variable." ) regressors_to_remove.append(reg) - if future: - return checked_df, regressors_to_remove if covariates is not None: for covar in covariates: if len(df[covar].unique()) < 2: @@ -573,12 +572,18 @@ def check_dataframe( "Encountered lagged regressor with only unique values in training set across all IDs." "Automatically removed variable." ) - regressors_to_remove.append(covar) + lag_regressors_to_remove.append(covar) + if future: + return checked_df, regressors_to_remove, lag_regressors_to_remove if len(regressors_to_remove) > 0: regressors_to_remove = list(set(regressors_to_remove)) - checked_df = checked_df.drop(*regressors_to_remove, axis=1) + checked_df = checked_df.drop(regressors_to_remove, axis=1) + assert checked_df is not None + if len(lag_regressors_to_remove) > 0: + lag_regressors_to_remove = list(set(lag_regressors_to_remove)) + checked_df = checked_df.drop(lag_regressors_to_remove, axis=1) assert checked_df is not None - return checked_df, regressors_to_remove + return checked_df, regressors_to_remove, lag_regressors_to_remove def _crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct=0.0):
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -49,7 +49,7 @@ def test_train_eval_test(): learning_rate=LR, ) df = pd.read_csv(PEYTON_FILE, nrows=95) - df, _ = df_utils.check_dataframe(df, check_y=False) + df, _, _ = df_utils.check_dataframe(df, check_y=False) df = _handle_missing_data(m, df, freq="D", predicting=False) df_train, df_test = m.split_df(df, freq="D", valid_p=0.1) metrics = m.fit(df_train, freq="D", validation_df=df_test) @@ -61,7 +61,7 @@ def test_train_eval_test(): def test_df_utils_func(): log.info("testing: df_utils Test") df = pd.read_csv(PEYTON_FILE, nrows=95) - df, _ = df_utils.check_dataframe(df, check_y=False) + df, _, _ = df_utils.check_dataframe(df, check_y=False) # test find_time_threshold df, _, _, _ = df_utils.prep_or_copy_df(df) @@ -1631,3 +1631,18 @@ def test_selective_forecasting(): ) metrics_df = m.fit(df, freq="H") forecast = m.predict(df) + + +def test_unused_future_regressors(): + df = pd.DataFrame( + { + "ds": {0: "2022-10-16 00:00:00", 1: "2022-10-17 00:00:00", 2: "2022-10-18 00:00:00"}, + "y": {0: 17, 1: 18, 2: 10}, + "price": {0: 3.5, 1: 3.5, 2: 3.5}, + "cost": {0: 2.5, 1: 2.5, 2: 2.5}, + } + ) + m = NeuralProphet(epochs=1, learning_rate=0.01) + m.add_future_regressor("price") + m.add_lagged_regressor("cost") + m.fit(df, freq="D") diff --git a/tests/test_regularization.py b/tests/test_regularization.py --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -57,7 +57,7 @@ def test_reg_func_abs(): def test_regularization_holidays(): log.info("testing: regularization of holidays") df = generate_holiday_dataset(y_holidays_override=Y_HOLIDAYS_OVERRIDE) - df, _ = df_utils.check_dataframe(df, check_y=False) + df, _, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=20, @@ -89,7 +89,7 @@ def test_regularization_holidays(): def test_regularization_events(): log.info("testing: regularization of events") df, events = generate_event_dataset(y_events_override=Y_EVENTS_OVERRIDE) - df, _ = df_utils.check_dataframe(df, check_y=False) + df, _, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=50, @@ -141,7 +141,7 @@ def test_regularization_lagged_regressor(): """ log.info("testing: regularization lagged regressors") df, lagged_regressors = generate_lagged_regressor_dataset(periods=100) - df, _ = df_utils.check_dataframe(df, check_y=False) + df, _, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=30, diff --git a/tests/test_unit.py b/tests/test_unit.py --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -78,7 +78,7 @@ def test_time_dataset(): config_missing = configure.MissingDataHandling() df_train, df_val = df_utils.split_df(df_in, n_lags, n_forecasts, valid_p) # create a tabularized dataset from time series - df, _ = df_utils.check_dataframe(df_train) + df, _, _ = df_utils.check_dataframe(df_train) local_data_params, global_data_params = df_utils.init_data_params(df=df, normalize="minmax") df = df.drop("ID", axis=1) df = df_utils.normalize(df, global_data_params) @@ -218,7 +218,7 @@ def check_split(df_in, df_len_expected, n_lags, n_forecasts, freq, p=0.1): n_lags=n_lags, n_forecasts=n_forecasts, ) - df_in, _ = df_utils.check_dataframe(df_in, check_y=False) + df_in, _, _ = df_utils.check_dataframe(df_in, check_y=False) df_in = _handle_missing_data(m, df_in, freq=freq, predicting=False) assert df_len_expected == len(df_in) total_samples = len(df_in) - n_lags - 2 * n_forecasts + 2 @@ -816,7 +816,7 @@ def test_too_many_NaN(): limit_linear=config_missing.impute_linear, rolling=config_missing.impute_rolling, ) - df, _ = df_utils.check_dataframe(df) + df, _, _ = df_utils.check_dataframe(df) local_data_params, global_data_params = df_utils.init_data_params(df=df, normalize="minmax") df = df.drop("ID", axis=1) df = df_utils.normalize(df, global_data_params)
drop() got multiple values for argument 'axis' ### Discussed in https://github.com/ourownstory/neural_prophet/discussions/1220 <div type='discussions-op-text'> <sup>Originally posted by **kzaho** March 16, 2023</sup> F-n check_dataframe at (https://github.com/ourownstory/neural_prophet/blob/main/neuralprophet/df_utils.py) fails. Is there an extra star symbol, which should be deleted? ``` regressors_to_remove = list(set(regressors_to_remove)) checked_df = checked_df.drop(*regressors_to_remove, axis=1) ``` [fix] Resolve "drop() got multiple values for argument 'axis'" error #1227
@kzaho thanks for reporting this. do you have a sample for us to reproduce the error? Let's pretend that we work with such data: `{'ds': {0: Timestamp('2022-10-16 00:00:00'), 1: Timestamp('2022-10-17 00:00:00'), 2: Timestamp('2022-10-18 00:00:00'), 3: Timestamp('2022-10-19 00:00:00'), 4: Timestamp('2022-10-20 00:00:00'), 5: Timestamp('2022-10-21 00:00:00'), 6: Timestamp('2022-10-22 00:00:00'), 7: Timestamp('2022-10-23 00:00:00'), 8: Timestamp('2022-10-24 00:00:00'), 9: Timestamp('2022-10-25 00:00:00')}, 'y': {0: 17, 1: 18, 2: 10, 3: 11, 4: 5, 5: 12, 6: 7, 7: 20, 8: 25, 9: 10}, 'price': {0: 3.5, 1: 3.5, 2: 3.5, 3: 3.5, 4: 3.5, 5: 3.5, 6: 3.5, 7: 3.5, 8: 3.5, 9: 3.5}, 'cost': {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5, 4: 2.5, 5: 2.5, 6: 2.5, 7: 2.5, 8: 2.5, 9: 2.5}}` I come up with a temporary solution, to explain how it works - we have to review a couple of things: 1st, 'neuralprophet/forecaster.py' - line 2979, "df = df[-(self.max_lags + n_historic_predictions) :]" so, in case we have trained model with max_lags = 3 and n_historic_predictions = 0, we will have df with 3 obs. `{'ds': {0: Timestamp('2022-10-23 00:00:00'), 1: Timestamp('2022-10-24 00:00:00'), 2: Timestamp('2022-10-25 00:00:00')}, 'y': {0: 20, 1: 25, 2: 10}, 'price': {0: 3.5, 1: 3.5, 2: 3.5}, 'cost': {0: 2.5, 1: 2.5, 2: 2.5}}` 2nd, this df with 3 obs, would be analyzed and at the line 542 'neuralprophet/df_utils.py'. In case, our columns ('price' and 'cost') would consist of 1 unique value, these columns would be appended to 'regressors_to_remove' list. Which leads to the error. Solution (temporary): slightly modify the last value for both columns ('price' and 'cost'). `df.iloc[-1:, 2:] = df.iloc[-1:, 2:]*1.0001` As, we have fixed data structure with column names ['ds', 'y', 'price', 'cost'], we can put index '2:'. @noxan are you working on this? @leoniewgnr @kzaho I've submitted a pull request as proposed by you @kzaho, many thanks for the hint. I still receive a "list index out of range" error with the adjusted code, could you please double check? I've also attached a jupyter notebook with my test code so we can discuss the problem with the same example code. It is all based on your example @kzaho, many thanks! [issue-1227.ipynb.zip](https://github.com/ourownstory/neural_prophet/files/11181668/issue-1227.ipynb.zip) @Pushkaran-P It does not make any difference for fitting the model, you're all good - only the logging message is misleading. Hi @kzaho @noxan @leoniewgnr, NeuralProphet generally does not allow a column that is used as a future regressor and filled with unique values (only exception: the df consists of multiple time series and one time series uses future regressors with only unique values). We call a column of unique values that is used as model input `Static Covariates`, which is a component we are currently adding to the framework. With that being said, the error message definitely is misleading. I will add a PR that removes the future regressors in a clean way, so that the code runs through without an error (and without using regressors). ## Model Benchmark | Benchmark | Metric | main | current | diff | | |---------------|----------|-----------|-----------|--------|--------------------| | AirPassengers | MAE_val | 15.2698 | 15.2698 | 0.0% | :white_check_mark: | | AirPassengers | RMSE_val | 19.4209 | 19.4209 | 0.0% | :white_check_mark: | | AirPassengers | Loss_val | 0.00195 | 0.00195 | 0.0% | :white_check_mark: | | AirPassengers | MAE | 9.82902 | 9.82902 | 0.0% | :white_check_mark: | | AirPassengers | RMSE | 11.7005 | 11.7005 | 0.0% | :white_check_mark: | | AirPassengers | Loss | 0.00056 | 0.00056 | 0.0% | :white_check_mark: | | AirPassengers | time | 4.66743 | 6.36 | 36.26% | :x: | | YosemiteTemps | MAE_val | 1.71298 | 1.71298 | -0.0% | :white_check_mark: | | YosemiteTemps | RMSE_val | 2.2607 | 2.26069 | -0.0% | :white_check_mark: | | YosemiteTemps | Loss_val | 0.00095 | 0.00095 | -0.0% | :white_check_mark: | | YosemiteTemps | MAE | 1.45187 | 1.45187 | 0.0% | :white_check_mark: | | YosemiteTemps | RMSE | 2.16721 | 2.16721 | -0.0% | :white_check_mark: | | YosemiteTemps | Loss | 0.00066 | 0.00066 | -0.0% | :white_check_mark: | | YosemiteTemps | time | 115.812 | 146.81 | 26.77% | :x: | | PeytonManning | MAE_val | 0.58159 | 0.58159 | 0.0% | :white_check_mark: | | PeytonManning | RMSE_val | 0.72216 | 0.72216 | 0.0% | :white_check_mark: | | PeytonManning | Loss_val | 0.01239 | 0.01239 | 0.0% | :white_check_mark: | | PeytonManning | MAE | 0.41671 | 0.41671 | 0.0% | :white_check_mark: | | PeytonManning | RMSE | 0.55961 | 0.55961 | -0.0% | :white_check_mark: | | PeytonManning | Loss | 0.00612 | 0.00612 | -0.0% | :white_check_mark: | | PeytonManning | time | 13.6127 | 17.26 | 26.79% | :x: | <details> <summary>Model training plots</summary> ## Model Training ### PeytonManning ![](https://asset.cml.dev/aa0e6f931321d0a7d0dc62d2feb9fdfda54f3638?cml=svg%2Bxml&cache-bypass=d2698af3-b26e-4b91-95bf-0b7e86a84277) ### YosemiteTemps ![](https://asset.cml.dev/438a04b76bc0e38eb146a649d9cf525bc31f45a6?cml=svg%2Bxml&cache-bypass=b8c609bd-8bc3-47ff-b29a-2d0a4582c138) ### AirPassengers ![](https://asset.cml.dev/d699bb9bce78079ad9f4207d4784e90e1cc29719?cml=svg%2Bxml&cache-bypass=dcd2e267-4a2e-416f-974c-9a78bcaa3ed9) </details> ![](https://cml.dev/watermark.png#618d908bcc054bad3543de3219b10226f849350d "CML watermark") ## [Codecov](https://codecov.io/gh/ourownstory/neural_prophet/pull/1250?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) Report > Merging [#1250](https://codecov.io/gh/ourownstory/neural_prophet/pull/1250?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) (4dadeeb) into [main](https://codecov.io/gh/ourownstory/neural_prophet/commit/f61527a454fb94f672f9c89cc5f3fb419224cda9?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) (f61527a) will **not change** coverage. > The diff coverage is `100.00%`. :mega: This organization is not using Codecov’s [GitHub App Integration](https://github.com/apps/codecov). We recommend you install it so Codecov can continue to function properly for your repositories. [Learn more](https://about.codecov.io/blog/codecov-is-updating-its-github-integration/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) ```diff @@ Coverage Diff @@ ## main #1250 +/- ## ======================================= Coverage 89.56% 89.56% ======================================= Files 35 35 Lines 4830 4830 ======================================= Hits 4326 4326 Misses 504 504 ``` | [Impacted Files](https://codecov.io/gh/ourownstory/neural_prophet/pull/1250?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) | Coverage Δ | | |---|---|---| | [neuralprophet/df\_utils.py](https://codecov.io/gh/ourownstory/neural_prophet/pull/1250?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe#diff-bmV1cmFscHJvcGhldC9kZl91dGlscy5weQ==) | `94.80% <100.00%> (ø)` | | Help us with your feedback. Take ten seconds to tell us [how you rate us](https://about.codecov.io/nps?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe). Have a feature suggestion? [Share it here.](https://app.codecov.io/gh/feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Oskar+Triebe) Thank you @noxan for tackling this! Running the notebook on branch 'issue-1227' I got the following error in your final cell ``` WARNING - (NP.forecaster.fit) - When Global modeling with local normalization, metrics are displayed in normalized scale. WARNING - (NP.df_utils.check_single_dataframe) - Encountered future regressor with only unique values in training set. Variable will be removed for global modeling if this is true for all time series. WARNING - (NP.df_utils.check_single_dataframe) - Encountered future regressor with only unique values in training set. Variable will be removed for global modeling if this is true for all time series. WARNING - (NP.df_utils.check_dataframe) - Encountered future regressor with only unique values in training set across all IDs.Automatically removed variable. WARNING - (NP.df_utils.check_dataframe) - Encountered future regressor with only unique values in training set across all IDs.Automatically removed variable. WARNING - (NP.forecaster._check_dataframe) - Removing regressor price because it is not present in the data. WARNING - (NP.forecaster._check_dataframe) - Removing regressor cost because it is not present in the data. INFO - (NP.df_utils._infer_frequency) - Major frequency D corresponds to 90.0% of the data. INFO - (NP.df_utils._infer_frequency) - Dataframe freq automatically defined as D INFO - (NP.config.init_data_params) - Setting normalization to global as only one dataframe provided for training. INFO - (NP.utils.set_auto_seasonalities) - Disabling yearly seasonality. Run NeuralProphet with yearly_seasonality=True to override this. INFO - (NP.utils.set_auto_seasonalities) - Disabling weekly seasonality. Run NeuralProphet with weekly_seasonality=True to override this. INFO - (NP.utils.set_auto_seasonalities) - Disabling daily seasonality. Run NeuralProphet with daily_seasonality=True to override this. --------------------------------------------------------------------------- IndexError Traceback (most recent call last) Input In [4], in <cell line: 6>() 4 m.add_future_regressor('price') 5 m.add_future_regressor('cost') ----> 6 m.fit(df) File ~/github/neural_prophet/neuralprophet/forecaster.py:892, in NeuralProphet.fit(self, df, freq, validation_df, epochs, batch_size, learning_rate, early_stopping, minimal, metrics, progress, checkpointing, continue_training, num_workers) 890 # Training 891 if validation_df is None: --> 892 metrics_df = self._train( 893 df, 894 progress_bar_enabled=bool(progress), 895 metrics_enabled=bool(self.metrics), 896 checkpointing_enabled=checkpointing, 897 continue_training=continue_training, 898 num_workers=num_workers, 899 ) 900 else: 901 df_val, _, _, _ = df_utils.prep_or_copy_df(validation_df) File ~/github/neural_prophet/neuralprophet/forecaster.py:2754, in NeuralProphet._train(self, df, df_val, progress_bar_enabled, metrics_enabled, checkpointing_enabled, continue_training, num_workers) 2752 # Set up data the training dataloader 2753 df, _, _, _ = df_utils.prep_or_copy_df(df) -> 2754 train_loader = self._init_train_loader(df, num_workers) 2755 dataset_size = len(df) # train_loader.dataset 2757 # Internal flag to check if validation is enabled File ~/github/neural_prophet/neuralprophet/forecaster.py:2688, in NeuralProphet._init_train_loader(self, df, num_workers) 2685 if self.config_country_holidays is not None: 2686 self.config_country_holidays.init_holidays(df_merged) -> 2688 dataset = self._create_dataset( 2689 df, predict_mode=False, prediction_frequency=self.prediction_frequency 2690 ) # needs to be called after set_auto_seasonalities 2692 # Determine the max_number of epochs 2693 self.config_train.set_auto_batch_epoch(n_data=len(dataset)) File ~/github/neural_prophet/neuralprophet/forecaster.py:2350, in NeuralProphet._create_dataset(self, df, predict_mode, prediction_frequency) 2328 """Construct dataset from dataframe. 2329 2330 (Configured Hyperparameters can be overridden by explicitly supplying them. (...) 2347 TimeDataset 2348 """ 2349 df, _, _, _ = df_utils.prep_or_copy_df(df) -> 2350 return time_dataset.GlobalTimeDataset( 2351 df, 2352 predict_mode=predict_mode, 2353 n_lags=self.n_lags, 2354 n_forecasts=self.n_forecasts, 2355 predict_steps=self.predict_steps, 2356 config_seasonality=self.config_seasonality, 2357 config_events=self.config_events, 2358 config_country_holidays=self.config_country_holidays, 2359 config_lagged_regressors=self.config_lagged_regressors, 2360 config_regressors=self.config_regressors, 2361 config_missing=self.config_missing, 2362 prediction_frequency=prediction_frequency, 2363 ) File ~/github/neural_prophet/neuralprophet/time_dataset.py:34, in GlobalTimeDataset.__init__(self, df, **kwargs) 32 self.length = 0 33 for df_name, df_i in df.groupby("ID"): ---> 34 timedataset = TimeDataset(df_i, df_name, **kwargs) 35 self.length += timedataset.length 36 for i in range(0, len(timedataset)): File ~/github/neural_prophet/neuralprophet/time_dataset.py:70, in TimeDataset.__init__(self, df, name, **kwargs) 68 self.init_after_tabularized(inputs, targets) 69 self.filter_samples_after_init(kwargs["prediction_frequency"]) ---> 70 self.drop_nan_after_init(df, kwargs["predict_steps"], drop_missing) File ~/github/neural_prophet/neuralprophet/time_dataset.py:86, in TimeDataset.drop_nan_after_init(self, df, predict_steps, drop_missing) 83 if key in self.two_level_inputs or key == "events" or key == "regressors": 84 # Extract tensor out of OrderedDict to see if it contains NaNs 85 tuple_list = list(data.items()) ---> 86 tensor = tuple_list[0][1] 87 if np.isnan(np.array(tensor)).any() and (i not in nan_idx): 88 nan_idx.append(i) IndexError: list index out of range ``` @ourownstory yeah, thanks for confirming. that's exactly the one I wrote about in #1227 ... 🤔
2023-04-15T22:45:17
ourownstory/neural_prophet
1,282
ourownstory__neural_prophet-1282
[ "1084" ]
f8516666268011f931f99a0ba7a49ca15166343c
diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -730,6 +730,7 @@ def smooth_loss_and_suggest(lr_finder_results, window=10): log.error( f"The number of loss values ({len(loss)}) is too small to estimate a learning rate. Increase the number of samples or manually set the learning rate." ) + raise return (loss, lr, suggestion)
Exception handling for too small dataset to detect learning rate is missleading In case the dataset is too small to detect a learning rate the following warning is show: `ERROR - (NP.utils.smooth_loss_and_suggest) - The number of loss values (1) is too small to estimate a learning rate. Increase the number of samples or manually set the learning rate.` Yet the python exception thrown is very misleading with: `UnboundLocalError: local variable 'suggestion' referenced before assignment` Solution: We should raise a `ValueError` or some other more suitable error message directly in `neuralprophet/utils.py:713` (inside `smooth_loss_and_suggest`) and make sure to not trigger an `UnboundLocalError`
2023-04-18T07:01:39
ourownstory/neural_prophet
1,286
ourownstory__neural_prophet-1286
[ "1235" ]
9f08ff69d89030efb6f921d28c7a8380b3f74842
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1797,14 +1797,15 @@ def set_plotting_backend(self, plotting_backend: str): plotly-resampler package to accelerate visualizing large data by resampling it. Only supported for jupyterlab notebooks and vscode notebooks. * ``plotly``: Use the plotly backend for plotting + * ``plotly-static``: Use the plotly backend to generate static svg * ``matplotlib``: use matplotlib for plotting """ - if plotting_backend in ["plotly", "matplotlib", "plotly-resampler"]: + if plotting_backend in ["plotly", "matplotlib", "plotly-resampler", "plotly-static"]: self.plotting_backend = plotting_backend log_warning_deprecation_plotly(self.plotting_backend) else: raise ValueError( - "The parameter `plotting_backend` must be either 'plotly', 'plotly-resampler' or 'matplotlib'." + "The parameter `plotting_backend` must be either 'plotly', 'plotly-resampler', 'plotly-resampler' or 'matplotlib'." ) def highlight_nth_step_ahead_of_each_forecast(self, step_number: Optional[int] = None): @@ -1860,6 +1861,7 @@ def plot( environments (colab, pycharm interpreter) plotly-resampler might not properly vizualise the figures. In this case, consider switching to 'plotly-auto'. * ``plotly``: Use the plotly backend for plotting + * ``plotly-static``: Use the plotly backend to generate static svg * ``matplotlib``: use matplotlib for plotting * (default) None: Plotting backend ist set automatically. Use plotly with resampling for jupyterlab notebooks and vscode notebooks. Automatically switch to plotly without resampling for all other @@ -1929,6 +1931,7 @@ def plot( figsize=tuple(x * 70 for x in figsize), highlight_forecast=forecast_in_focus, resampler_active=plotting_backend == "plotly-resampler", + plotly_static=plotting_backend == "plotly-static", ) else: return plot( @@ -2047,10 +2050,12 @@ def plot_latest_forecast( environments (colab, pycharm interpreter) plotly-resampler might not properly vizualise the figures. In this case, consider switching to 'plotly-auto'. * ``plotly``: Use the plotly backend for plotting + * ``plotly-static``: Use the plotly backend to generate static svg * ``matplotlib``: use matplotlib for plotting ** (default) None: Plotting backend ist set automatically. Use plotly with resampling for jupyterlab notebooks and vscode notebooks. Automatically switch to plotly without resampling for all other environments. + * (default) None Returns ------- matplotlib.axes.Axes @@ -2097,6 +2102,7 @@ def plot_latest_forecast( highlight_forecast=self.highlight_forecast_step_n, line_per_origin=True, resampler_active=plotting_backend == "plotly-resampler", + plotly_static=plotting_backend == "plotly-static", ) else: return plot( @@ -2169,6 +2175,7 @@ def plot_components( environments (colab, pycharm interpreter) plotly-resampler might not properly vizualise the figures. In this case, consider switching to 'plotly-auto'. * ``plotly``: Use the plotly backend for plotting + * ``plotly-static``: Use the plotly backend to generate static svg * ``matplotlib``: use matplotlib for plotting * (default) None: Plotting backend ist set automatically. Use plotly with resampling for jupyterlab notebooks and vscode notebooks. Automatically switch to plotly without resampling for all other @@ -2260,6 +2267,7 @@ def plot_components( df_name=df_name, one_period_per_season=one_period_per_season, resampler_active=plotting_backend == "plotly-resampler", + plotly_static=plotting_backend == "plotly-static", ) else: return plot_components( @@ -2323,6 +2331,7 @@ def plot_parameters( environments (colab, pycharm interpreter) plotly-resampler might not properly vizualise the figures. In this case, consider switching to 'plotly-auto'. * ``plotly``: Use the plotly backend for plotting + * ``plotly-static``: Use the plotly backend to generate static svg * ``matplotlib``: use matplotlib for plotting * (default) None: Plotting backend ist set automatically. Use plotly with resampling for jupyterlab notebooks and vscode notebooks. Automatically switch to plotly without resampling for all other @@ -2331,7 +2340,6 @@ def plot_parameters( Note ---- For multiple time series and local modeling of at least one component, the df_name parameter is required. - quantile : float The quantile for which the model parameters are to be plotted @@ -2405,17 +2413,33 @@ def plot_parameters( log_warning_deprecation_plotly(plotting_backend) if plotting_backend.startswith("plotly"): - return plot_parameters_plotly( - m=self, - quantile=quantile, - weekly_start=weekly_start, - yearly_start=yearly_start, - figsize=tuple(x * 70 for x in figsize) if figsize else (700, 210), - df_name=valid_plot_configuration["df_name"], - plot_configuration=valid_plot_configuration, - forecast_in_focus=forecast_in_focus, - resampler_active=plotting_backend == "plotly-resampler", - ) + if plotting_backend == "plotly-static": + fig = plot_parameters_plotly( + m=self, + quantile=quantile, + weekly_start=weekly_start, + yearly_start=yearly_start, + figsize=tuple(x * 70 for x in figsize) if figsize else (700, 210), + df_name=valid_plot_configuration["df_name"], + plot_configuration=valid_plot_configuration, + forecast_in_focus=forecast_in_focus, + resampler_active=plotting_backend == "plotly-resampler", + plotly_static=plotting_backend == "plotly-static", + ) + fig.show("svg") + else: + return plot_parameters_plotly( + m=self, + quantile=quantile, + weekly_start=weekly_start, + yearly_start=yearly_start, + figsize=tuple(x * 70 for x in figsize) if figsize else (700, 210), + df_name=valid_plot_configuration["df_name"], + plot_configuration=valid_plot_configuration, + forecast_in_focus=forecast_in_focus, + resampler_active=plotting_backend == "plotly-resampler", + plotly_static=plotting_backend == "plotly-static", + ) else: return plot_parameters( m=self, diff --git a/neuralprophet/plot_forecast_plotly.py b/neuralprophet/plot_forecast_plotly.py --- a/neuralprophet/plot_forecast_plotly.py +++ b/neuralprophet/plot_forecast_plotly.py @@ -51,6 +51,7 @@ def plot( line_per_origin=False, figsize=(700, 210), resampler_active=False, + plotly_static=False, ): """ Plot the NeuralProphet forecast @@ -73,6 +74,8 @@ def plot( Width, height in inches. resampler_active : bool Flag whether to activate the plotly-resampler + plotly_static: bool + Flag whether to generate a static svg image Returns ------- @@ -227,6 +230,8 @@ def plot( ) fig = go.Figure(data=data, layout=layout) unregister_plotly_resampler() + if plotly_static: + fig = fig.show("svg") return fig @@ -238,6 +243,7 @@ def plot_components( one_period_per_season=False, figsize=(700, 210), resampler_active=False, + plotly_static=False, ): """ Plot the NeuralProphet forecast components. @@ -258,6 +264,8 @@ def plot_components( Width, height in inches. resampler_active : bool Flag whether to activate the plotly-resampler + plotly_static: bool + Flag whether to generate a static svg image Returns ------- @@ -339,6 +347,8 @@ def plot_components( for ax in multiplicative_axes: ax = set_y_as_percent(ax) unregister_plotly_resampler() + if plotly_static: + fig = fig.show("svg") return fig diff --git a/neuralprophet/plot_model_parameters_plotly.py b/neuralprophet/plot_model_parameters_plotly.py --- a/neuralprophet/plot_model_parameters_plotly.py +++ b/neuralprophet/plot_model_parameters_plotly.py @@ -817,6 +817,7 @@ def plot_parameters( df_name=None, forecast_in_focus=None, resampler_active=False, + plotly_static=False, ): """Plot the parameters that the model is composed of, visually. @@ -860,6 +861,8 @@ def plot_parameters( None (default): plot self.highlight_forecast_step_n by default resampler_active : bool Flag whether to activate the plotly-resampler + plotly_static: bool + Flag whether to generate a static svg image Returns: Plotly figure
diff --git a/tests/test_plotting.py b/tests/test_plotting.py --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -27,9 +27,10 @@ # plot tests cover both plotting backends decorator_input = ["plotting_backend", [("matplotlib"), ("plotly")]] +decorator_input_extended = ["plotting_backend", [("matplotlib"), ("plotly"), ("plotly-static"), ("plotly-resampler")]] [email protected](*decorator_input) [email protected](*decorator_input_extended) def test_plot(plotting_backend): log.info(f"testing: Basic plotting with forecast in focus with {plotting_backend}") df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
Add plotly-static plotting backend **Is your feature request related to a problem? Please describe.** - The plots in the tutorial Jupyter notebooks cannot be saved and restored due the interactive nature of plotly plots. **Describe the solution you'd like** - Add a `plotly-static` option for our plotting backend which renders static (non-interactive) plotly plots. **Alternatives considered** - It would be possible to set the plotting backend to `plotly` (without the resampler), capture the returned figure and override the renderer, e.g. `fig.show(renderer="svg")`. But this is rather unpractical if users want to opt for the interactive version when they work through the tutorials and have to adjust this each and every time.
I can work on this
2023-04-19T17:46:40
ourownstory/neural_prophet
1,293
ourownstory__neural_prophet-1293
[ "1133" ]
5e8fa94c0379ef1cd45ceb2dadacacdaab37843a
diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -1,5 +1,5 @@ import logging -from typing import Optional +from typing import List, Optional import numpy as np import pandas as pd @@ -12,12 +12,25 @@ ConfigLaggedRegressors, ConfigSeasonality, ) +from neuralprophet.np_types import Components log = logging.getLogger("NP.data.processing") -def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, prediction_frequency, dates): - """Turns forecast-origin-wise predictions into forecast-target-wise predictions. +def _reshape_raw_predictions_to_forecst_df( + df: pd.DataFrame, + predicted: np.ndarray, + components: Optional[Components], + prediction_frequency: Optional[dict], + dates: pd.Series, + n_forecasts: int, + max_lags: int, + freq: Optional[str], + quantiles: List[float], + config_lagged_regressors: Optional[ConfigLaggedRegressors], +) -> pd.DataFrame: + """ + Turns forecast-origin-wise predictions into forecast-target-wise predictions. Parameters ---------- @@ -31,6 +44,16 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre Frequency of the predictions dates : pd.Series timestamps referring to the start of the predictions + n_forecasts : int + Number of steps ahead of prediction time step to forecast. + max_lags : int + Maximum number of lags to use + freq : str + Data step sizes. Frequency of data recording. + quantiles : list[float] + List of quantiles to include in the forecast + config_lagged_regressors : ConfigLaggedRegressors + Configuration for lagged regressors Returns ------- @@ -48,11 +71,11 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre df_forecast = pd.concat((df[cols],), axis=1) # create a line for each forecast_lag # 'yhat<i>' is the forecast for 'y' at 'ds' from i steps ago. - for j in range(len(model.config_train.quantiles)): - for forecast_lag in range(1, model.n_forecasts + 1): + for j in range(len(quantiles)): + for forecast_lag in range(1, n_forecasts + 1): forecast = predicted[:, forecast_lag - 1, j] - pad_before = model.max_lags + forecast_lag - 1 - pad_after = model.n_forecasts - forecast_lag + pad_before = max_lags + forecast_lag - 1 + pad_after = n_forecasts - forecast_lag yhat = np.concatenate(([np.NaN] * pad_before, forecast, [np.NaN] * pad_after)) if prediction_frequency is not None: ds = df_forecast["ds"].iloc[pad_before : -pad_after if pad_after > 0 else None] @@ -68,7 +91,7 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre if j == 0: name = f"yhat{forecast_lag}" else: - name = f"yhat{forecast_lag} {round(model.config_train.quantiles[j] * 100, 1)}%" + name = f"yhat{forecast_lag} {round(quantiles[j] * 100, 1)}%" df_forecast[name] = yhat if components is None: @@ -78,16 +101,16 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre lagged_components = [ "ar", ] - if model.config_lagged_regressors is not None: - for name in model.config_lagged_regressors.keys(): + if config_lagged_regressors is not None: + for name in config_lagged_regressors.keys(): lagged_components.append(f"lagged_regressor_{name}") for comp in lagged_components: if comp in components: - for j in range(len(model.config_train.quantiles)): - for forecast_lag in range(1, model.n_forecasts + 1): + for j in range(len(quantiles)): + for forecast_lag in range(1, n_forecasts + 1): forecast = components[comp][:, forecast_lag - 1, j] # 0 is the median quantile - pad_before = model.max_lags + forecast_lag - 1 - pad_after = model.n_forecasts - forecast_lag + pad_before = max_lags + forecast_lag - 1 + pad_after = n_forecasts - forecast_lag yhat = np.concatenate(([np.NaN] * pad_before, forecast, [np.NaN] * pad_after)) if prediction_frequency is not None: ds = df_forecast["ds"].iloc[pad_before : -pad_after if pad_after > 0 else None] @@ -106,10 +129,10 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre # only for non-lagged components for comp in components: if comp not in lagged_components: - for j in range(len(model.config_train.quantiles)): + for j in range(len(quantiles)): forecast_0 = components[comp][0, :, j] - forecast_rest = components[comp][1:, model.n_forecasts - 1, j] - yhat = np.concatenate(([np.NaN] * model.max_lags, forecast_0, forecast_rest)) + forecast_rest = components[comp][1:, n_forecasts - 1, j] + yhat = np.concatenate(([np.NaN] * max_lags, forecast_0, forecast_rest)) if prediction_frequency is not None: date_list = [] for key, value in prediction_frequency.items(): @@ -132,10 +155,10 @@ def _reshape_raw_predictions_to_forecst_df(model, df, predicted, components, pre dates_comp = dates_comp[dates_comp.isin(date_list[i])] ser = pd.Series(dtype="datetime64[ns]") for date in dates_comp: - d = pd.date_range(date, periods=model.n_forecasts + 1, freq=model.data_freq) + d = pd.date_range(date, periods=n_forecasts + 1, freq=freq) ser = pd.concat((ser, pd.Series(d).iloc[1:])) df_comp = pd.DataFrame({"ds": ser, "yhat": components[comp].flatten()}).drop_duplicates(subset="ds") - df_comp, _ = df_utils.add_missing_dates_nan(df_comp, freq=model.data_freq) + df_comp, _ = df_utils.add_missing_dates_nan(df=df_comp, freq=freq) yhat = pd.merge(df_forecast.filter(["ds", "ID"]), df_comp, on="ds", how="left")["yhat"].values if j == 0: # temporary condition to add only the median component # add yhat into dataframe, using df_forecast indexing diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1072,7 +1072,16 @@ def predict(self, df: pd.DataFrame, decompose: bool = True, raw: bool = False): fcst = fcst[:-1] else: fcst = _reshape_raw_predictions_to_forecst_df( - self, df_i, predicted, components, self.prediction_frequency, dates + df=df_i, + predicted=predicted, + components=components, + prediction_frequency=self.prediction_frequency, + dates=dates, + n_forecasts=self.n_forecasts, + max_lags=self.max_lags, + freq=self.data_freq, + quantiles=self.config_train.quantiles, + config_lagged_regressors=self.config_lagged_regressors, ) if periods_added[df_name] > 0: fcst = fcst[: -periods_added[df_name]] diff --git a/neuralprophet/np_types.py b/neuralprophet/np_types.py --- a/neuralprophet/np_types.py +++ b/neuralprophet/np_types.py @@ -1,6 +1,7 @@ import sys from typing import Dict, List, Union +import torch import torchmetrics # Ensure compatibility with python 3.7 @@ -21,3 +22,5 @@ CollectMetricsMode = Union[List[str], bool, Dict[str, torchmetrics.Metric]] SeasonGlobalLocalMode = Literal["global", "local"] + +Components = Dict[str, torch.Tensor]
Refactor forecaster.py - [x] Create an overview of all functions to be moved (incl origin, destiny and their places of use) - [x] Simplify the file, move functions to util functions - [x] No private functions in forecaster.py -> move to to other appropriate files - [ ] Restructure utils files (coherence, move more into appropriate separate files, maybe rename utils to something without ‘util’)
2023-04-20T22:41:33
ourownstory/neural_prophet
1,298
ourownstory__neural_prophet-1298
[ "1234" ]
edf8bf5d58921e1f7094618c2a290fb0befe14cb
diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -793,6 +793,9 @@ def test_step(self, batch, batch_idx): loss, reg_loss = self.loss_func(inputs, predicted, targets) # Metrics if self.metrics_enabled: + predicted_denorm = self.denormalize(predicted[:, :, 0]) + target_denorm = self.denormalize(targets.squeeze(dim=2)) + self.log_dict(self.metrics_val(predicted_denorm, target_denorm), **self.log_args) self.log("Loss_test", loss, **self.log_args) self.log("RegLoss_test", reg_loss, **self.log_args)
diff --git a/tutorials/feature-use/test_and_crossvalidate.ipynb b/tutorials/feature-use/test_and_crossvalidate.ipynb --- a/tutorials/feature-use/test_and_crossvalidate.ipynb +++ b/tutorials/feature-use/test_and_crossvalidate.ipynb @@ -18,14 +18,14 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "80400b6d-ca57-47ba-9dc5-0da3885ab6b1", "metadata": {}, "outputs": [], "source": [ "if \"google.colab\" in str(get_ipython()):\n", - " !pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while\n", - " #!pip install neuralprophet # much faster, but may not have the latest upgrades/bugfixes\n", + " #!pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while\n", + " !pip install neuralprophet # much faster, but may not have the latest upgrades/bugfixes\n", "\n", "import pandas as pd\n", "from neuralprophet import NeuralProphet, set_log_level\n", @@ -43,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "90e2ee7e-6798-4a0c-a37f-c3b7325d9c70", "metadata": {}, "outputs": [], @@ -73,17 +73,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "170af980-1830-4909-800a-b3e4f0ccc4f9", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " \r" - ] - }, { "data": { "text/html": [ @@ -105,28 +98,30 @@ " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", + " <th>MAE_val</th>\n", + " <th>RMSE_val</th>\n", + " <th>Loss_test</th>\n", + " <th>RegLoss_test</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>0</th>\n", - " <td>0.005307</td>\n", - " <td>26.848057</td>\n", - " <td>32.760937</td>\n", + " <td>18.830694</td>\n", + " <td>23.04147</td>\n", + " <td>0.00274</td>\n", + " <td>0.0</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ - " SmoothL1Loss MAE RMSE\n", - "0 0.005307 26.848057 32.760937" + " MAE_val RMSE_val Loss_test RegLoss_test\n", + "0 18.830694 23.04147 0.00274 0.0" ] }, - "execution_count": 3, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -154,28 +149,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "id": "47d23260-ad96-4196-aba1-7eb9fd2e6b21", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAsgAAAGoCAYAAABbtxOxAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAACI0ElEQVR4nO3dd3xb9bk/8M/RsmzLe6/Ycew409kkhgAOJhAoo6GB0NKSlpFOoL2/cilt6YI2tLe3t5S2tCkpo6VQRkkoq4CDmU5CdkKWHduJ5b0kD0nWOr8/5OM4iYe2jqTP+/Xq614cWedrfT0ePef5Po8giqIIIiIiIiICAChCvQAiIiIiIjlhgExERERENAYDZCIiIiKiMRggExERERGNwQCZiIiIiGgMVagX4Iv09HQUFRUF/bo2mw1qtTro16WJcU/kh3siP9wTeeK+yA/3RH4CtSdNTU3o7u4+7+NhHSAXFRVh9+7dQb9ua2srcnNzg35dmhj3RH64J/LDPZEn7ov8cE/kJ1B7snTp0nE/zhILIiIiIqIxGCATEREREY3BAJmIiIiIaIywrkEej81mg16vh8ViCdg1HA4HjEZjwJ4/WLRaLfLz83kQgYiIiGiMiAuQ9Xo9EhISUFRUBEEQAnINq9UKjUYTkOcOFlEU0dPTA71ej+nTp4d6OURERESyEXElFhaLBWlpaQELjiOFIAhIS0sLaKadiIiIKBxFXIAMgMGxm/g6EREREZ0vIgNkIiIiIiJvMUAOgpqaGlxzzTUefc6TTz6J1tbW0f/+/e9/j5KSEgiCMO7EFyIiIiLyDwbIMnVugHzRRRfhnXfeQWFhYQhXRURERBT5GCD72Y9+9CP89re/Hf3vH/zgBzhw4AAGBwexbt06zJo1C7fccgtEUQQA/OxnP8OyZcswb948bNy4EaIo4sUXX8Tu3btxyy23YOHChTCbzVi0aBGKiopC80URERERRZGIa/M21re3Hsb+1n6/PufC3ET86uqZE/77bbfdhhtuuAHf/va34XQ68dxzz+FXv/oV9u3bh08//RS5ubm46KKL8NFHH2HlypX41re+hR/96EcAgC996Ut49dVXsW7dOvz+97/Hr3/96wlnhBMRERFRYDCD7GdFRUVIS0vDvn378NZbb2HRokVIS0vDBRdcgPz8fCgUCixcuBBNTU0AgHfffRfLly/H/PnzsX37dnz66aeh/QKIiIiIolxEZ5B/+9l5AXleq9U66b/fcccdePLJJ9He3o7bbrsNABATEzP670qlEna7HRaLBd/4xjewe/duFBQU4Cc/+Qn7EhMRERGFGDPIAbB27Vq8+eab+OSTT3DllVdO+DgpGE5PT8fg4CBefPHF0X9LSEjAwMBAwNdKREREkUcURVjtzlAvI2wxQA4AjUaDVatW4aabboJSqZzwccnJybjzzjsxb948XHnllVi2bNnov335y1/G1772tdFDer/73e+Qn58PvV6P8vJy3HHHHcH4UoiIiCgM/WXHacTf/zrWPvEJ/v1pO+wOBsueEESpnUIYWrp0KXbv3n3Wx44ePYrZs2cH9LpWqxUajWbCf3c6nVi8eDFeeOEFlJaWBnQtvgrG6xUMra2tyM3NDfUyaAzuifxwT+SJ+yI/kbAnX3/xIJ74pBlJWhU6B63ITojBhqUF+MoFBSjL1IV6eR4L1J6MF0sCzCD73ZEjR1BSUoKqqirZB8dEREQUmfRGC2Zl6qD/0Wps/coyXDAtGb9+7yRm/fJdXPz7j/DErtMYHLaHepmyFdGH9EJhzpw5aGhoCPUyiIiIKIrpDWbkJ2mhVipw/bxsXD8vG+39Fvxtjx5bdp7Gbf88gLu3Hsaf15XjC4vzQ71c2WEGmYiIiCjCtPRbkJ8ce9bHshO1uHdVCY7etwoffesixKmVeOXTjhCtUN4YIBMRERFFEIvNga5BK/KTtOP+uyAIuHB6KkrT49E5OBzk1YUHBshEREREEaS139VGNj8pdtLHZSXEoGOAAfJ4GCATERERRRC9wRUg502QQZYwQJ4YA+QQq6mpwccff+zTc+h04deuhYiIiAJDbzQDAPKTJw+QM3Ux6DHZ2CN5HAyQQ8wfATIRERGRRMogu1NiAQBdQ9aAryncMEAGUFtbi02bNqG2ttYvz/fZz34WS5Yswdy5c7F58+bRj7/55ptYvHgxFixYgKqqKjQ1NeFPf/oT/u///g8LFy7EBx98gC9/+ctnjZyWssODg4OoqqrC4sWLMX/+fGzbtm3SNfzoRz/Cb3/729H//sEPfoBHHnnEL18fERERyZfeaEGiVoUE7eTdfDN1rqFnLLM4X9T3Qa6trUVVVdXodLzq6mpUVFT49Jx//etfkZqaCrPZjGXLluFzn/scnE4n7rzzTrz//vuYPn06ent7kZqaiq997WvQ6XT47ne/CwDYsmXLuM+p1Wrx8ssvIzExEd3d3VixYgWuu+46CIIw7uNvu+023HDDDfj2t78Np9OJ5557Drt27fLp6yIiIiL50xvNE3awGCtL58ogs5PF+aI+QK6pqYHVaoXD4YDVakVNTY3PAfLvfvc7vPzyywCA5uZm1NXVoaurC5dccgmmT58OAEhNTfXoOUVRxPe//328//77UCgUaGlpQUdHB7Kzs8d9fFFREdLS0rBv3z50dHRg0aJFSEtL8+nrIiIiIvnTGyxTllcAZ0osmEE+X9QHyJWVldBoNKMZ5MrKSp+er6amBu+88w5qa2sRFxeHyspKWCwWtz9fpVLB6XQVyzudTlitrrqgZ555Bl1dXdizZw/UajWKioqmfN477rgDTz75JNrb23Hbbbd5/0URERFR2GgxWjA/J2HKx2WOZpBZg3yuqK9BrqioQHV1NR588EG/lFcYjUakpKQgLi4Ox44dw44dOwAAK1aswPvvv4/GxkYAQG9vLwAgISEBAwMDo59fVFSEPXv2AABeeeUV2Gy20efNzMyEWq3Gu+++i1OnTk25lrVr1+LNN9/EJ598giuvvNKnr4uIiIjkz+Zwom3AvQxyolaFGJWCGeRxRH0GGXAFyb4GxpI1a9bgT3/6E2bPno2ysjKsWLECAJCRkYHNmzfjhhtugNPpRGZmJt5++21ce+21WLduHbZt24ZHH30Ud955J66//nosWLAAa9asQXx8PADglltuwbXXXov58+dj6dKlmDVr1pRr0Wg0WLVqFZKTk6FUKv3y9REREZF8tfcPQxSnbvEGuCbqZSXEoIM1yOdhgOxnMTExeOONN8b9t6uuugpXXXXVWR+bOXMmDh48eNbHpKwzAPzyl78EAKSnp0/YZWNwcHDcjzudTuzYsQMvvPCC2+snIiKi8DXaA9mNQ3qAq5MFD+mdL+pLLCLVkSNHUFJSgqqqKpSWloZ6OURERBQEeuNID+TkqUssAFcnC5ZYnI8Z5Ag1Z84cNDQ0hHoZREREFER6gyuDPNWYaUmmLgb7WvoDuaSwFJEZZFEUQ72EsMDXiYiIKLLojRbEqhVIiVW79fishBh0Dg4zJjhHxAXIWq0WPT093OgpiKKInp4eaLXuvcMkIiIi+ZN6IE80SOxcWQkxsDtF9JltAV5ZeIm4Eov8/Hzo9Xp0dXUF7BoOhyMiukJotVrk5+eHehlERETkJ3qj2a0OFhJp3HTnwDBS4zSBWlbYibgAWa1Wj06rC5TW1lbk5uYG9BpEREREntIbLbi02P3JudK46Y7BYczKmnq4SLSIuBILIiIiomjkdIpoNVo8yiCfGTfNaXpjMUAmIiIiigCdg8OwO0W3puhJzoybZqu3sRggExEREUWA0R7IbrZ4A4C0eA0UAtgL+RwMkImIiIgigNQD2ZMSC6VCQHq8huOmz8EAmYiIiCgCnMkgu19iAYz0QmYG+SwMkImIiIgigN5ggUapQHq8Z+3asnQx6BjkIb2xGCATERERRQC90YzcpBgoFO4NCZFk6mJ4SO8cDJCJiIiIIoDeaPG4vAJwlVjwkN7ZGCATERERRQC9wexRBwtJVkIMhqwODA3bA7Cq8MQAmYiIiCjMiaLoyiAne55BHh03zTrkUQyQiYiIiMJcr8mGYbvT6wwyALZ6G4MBMhEREVGY0xs974EskabpsQ75DAbIRERERGFOb/CuBzLgavMGcNz0WAyQiYiIiMKcTxnkBFcNMjPIZzBAJiIiIgpzeoMFSoWA7ATPA+QYlRJJWhUP6Y3BAJmIiIgozOmNFuQkxEDp4ZAQCXshn40BMhEREVGY0xvMXrV4k2TqYtjFYgwGyERERERhTm+0IM+LFm+SrASOmx6LATIRERFRGBNFEc1eTtGTZOlYYjEWA2QiIiKiMNZvsWPI6vCqxZskU6dBr8kGm8Ppx5WFLwbIRERERGGsxTjSA9mLFm8SaZpeFztZAGCATERERBTWRnsg+1iDDLAXsoQBMhEREVEYG52i52MXC4DT9CQMkImIiIjCmH6kxCI30Q8ZZAbIABggExEREYU1vdGMrIQYaFTeh3WZOo6bHitgAfLx48excOHC0f8lJibit7/9LXp7e7F69WqUlpZi9erV6OvrA+BqUXL33XejpKQE5eXl2Lt3b6CWRkRERBQx9AaLT/XHAJAQo4JWpeC46REBC5DLysqwf/9+7N+/H3v27EFcXBzWrl2Lhx9+GFVVVairq0NVVRUefvhhAMAbb7yBuro61NXVYfPmzfj6178eqKURERERRQy90bceyAAgCALHTY8RlBKL6upqzJgxA4WFhdi2bRs2bNgAANiwYQO2bt0KANi2bRtuvfVWCIKAFStWwGAwoK2tLRjLIyIiIgpbeoPFpwN6kkwOCxmlCsZFnnvuOXz+858HAHR0dCAnJwcAkJ2djY6ODgBAS0sLCgoKRj8nPz8fLS0to4+VbN68GZs3bwYAtLe3o7W1NRhfwlm6urqCfk2aHPdEfrgn8sM9kSfui/yE056YbA70mW1IEKw+x0RJahEtfYMhia2mEuw9CXiAbLVa8corr2DTpk3n/ZsgCBAEwaPn27hxIzZu3AgAWLp0KXJzc/2yTk+F6ro0Me6J/HBP5Id7Ik/cF/kJlz050TUIAJhTkOnzmgvTu/Bpd4dsv/ZgrivgJRZvvPEGFi9ejKysLABAVlbWaOlEW1sbMjMzAQB5eXlobm4e/Ty9Xo+8vLxAL4+IiIh81Gey4kCrEQ6nGOqlRJ0zU/T8UGKRoEHXoBVO7mPgA+Rnn312tLwCAK677jo89dRTAICnnnoK119//ejHn376aYiiiB07diApKem88goiIiKSn+9s+xQL//d9pD7wJq7dsgu/ee8k9uoNDJiDQG/wfYqeJEsXA7tTRJ/Z5vNzhbuAllgMDQ3h7bffxp///OfRj33ve9/DTTfdhC1btqCwsBDPP/88AODqq6/G66+/jpKSEsTFxeGJJ54I5NKIiIjIT452DmJOlg4rp6ei5mQPXj3iOl+UHKvGJcWpqJyRhlUl6ViQm+hxaSVNThoSkueHAFmaptcxMIy0eI3PzxfOAhogx8fHo6en56yPpaWlobq6+rzHCoKAP/zhD4FcDhEREQVAU68J18/Lxp9vXAAAaDGa8d7JHtSc7EFNfQ9e+dQVMD9580JsWFYw2VORh/QGC1Lj1IjT+B7SSdP0OgeHMQcJPj9fOAtKFwsiIiKKTCarHZ2DVhSlxI1+LC8pFl9YnI8vLM4H4CoDKP5FNY52DIZqmRHL1QPZ9/pjYMy4abZ646hpIiIi8t7pPlcNbFHqxEFafnIsshNi0DZgCdayoobeaEF+su/lFcCYcdODDJAZIBMREZHXmqQAeUwGeTw5iVq09TNA9je9wfcpepK0OA2UCoHjpsEAmYiIiHzQ1GsCABROkkEGgJyEGLT1MzPpT8N2BzoHrX5p8QYACoWAjHgNSyzAAJmIiIh80NRrhlopICdh8iwmM8j+12p0BbL+yiADrk4WnSyxYIBMRERE3mvqM6EwJQ4KxeTt23IStegx2WC1O4O0ssjXYnSVt/ijxZskK4EZZIABMhEREfngVJ8ZRSlT3+LPSXR1SGjnQT2/kXog+6uLBeDKIPOQHgNkIiIi8kFTryuDPJXcRFeWk3XI/qM3SGOm/ZlBjuEhPTBAJiIiIi+ZbQ60DwxP2uJNImWQWYfsP3qjGQkxKiRq1X57zixdDExWBwaH7X57znDEAJmIiIi8cqYH8tQZ5JyRDHIrM8h+488eyJKx46ajGQNkIiIi8sqpPleLN3dqkDN1MVAIzCD7kz97IEuyElzDQqK9kwUDZCIiIvJKU6/7GWSlQkCmjr2Q/UlvtPj1gB7AcdMSBshERETklaY+E1QKYbR8Yio5iRw37S92hxNt/YErsYj2g3oMkImIiMgrTb1mTEuJhXKKHsgSDgvxn/aBYThF/7Z4A8bUILPEgoiIiMhzTb0mFLnR4k2Sk6BliYWfjPZA9nMGWaNSIDlWzRKLUC+AiIiIwtOpPrNbLd4kOYmuMcYOpxjAVUUHaYqevzPIAJCl0/CQXqgXQEREROFn2O5Aa7/FrQN6ktwkLZwiOyT4gzQkxJ9jpiVZCTHMIId6AURERBR+pB7IhW60eJPkJHBYiL/ojRZoVQqkxvlvSIgkU8cAmQEyEREReWy0xZsnNcgcN+03eoMZ+cmxEAT3Dkh6guOmGSATERGRF5qkISEe1iADQCszyD5z9UD2f3kF4Mog95ltsNqdAXn+cMAAmYiIiDx2qs8MlUJArps9kAEgO4EZZH85bTCjINn/B/SAM9P0uoaid58YIBMREZHHmnpNKEiOhUrpfiihUSmQFqdmDbKP7A4nWowWj+q/PZGl4zQ9BshERETksaZek1cBGoeF+K7FaIHDKQYsQM5kgMwAmYiIiDzX1Gf2qMWbxDVuOnoDL3+Q6r8LPTgg6YmsBI6bZoBMREREHrHana4eyMwgh8QpqcWeBwckPSEFyMwgExEREbmp2WCGKMK7DHKCFu0DwxBFTtPzlhQgTwvQIb14jRKxagU6onigCwNkIiIi8khTr3SL3/MALTcpBjaHiJ6h6L1976tTvWZkJcRAq1YG5PkFQRjphcwAmYiIiMgtTSMZTO9qkEdavUXx7Xtfnerz7oCkJ6J9mh4DZCIiIvJIU68JSoXg1aAKjpv23ak+c8AD5CxddE/TY4BMREREHjnVZ0Z+ktajHsgSKYPcaoze7KQvnE4Rpw3mgHWwkGQlMINMRERE5LamPpNX5RXAmXHTbQPMIHujc3AYw3ZnEEosNOgassLpjM7DlAyQiYiIyCPeDgkBgDiNColaFcdNe2m0xVugSywSYuBwiug1RWeZBQNkIiIicpttZMxxkQ+3+HMSYliD7KUzAXKASyykaXpRWofMAJmIiIjcpjdY4BSBIh+GVHBYiPdO9XnfYs8TmaPT9KIz088AmYiIiNwmjTn2tgYZGAmQo/gAmC9O9ZmRpFUhKVYd0OuMZpCjdJ8YIBMREZHbpCEhvpRY5Ca6Siw4Tc9zrhZvgS2vAFyH9AAGyERERERTauo1QyEA+cme90CW5CRqYbY50W+x+3Fl0SEYQ0IAIDVOA6VCYIkFERER0VSa+kzIS9JC7UUPZMloqzfWIXssGENCAEChEJCp06BjgIf0iIiIiCZ1qs/sU/0xwHHT3jKYbei32INSYgGMjJtmBpmIiIhock29Jp/qj4Ez46Zbjcwge2K0g4UPHUQ84Ro3zQCZiIiIaEJ2hxN6o8XnW/yjGWQOC/HIqd7gDAmRRPO4aQbIRERE5Ba90QKHU/S5xCJRq0KsWsFx0x4K1pAQSaZOg87B4ajsNsIAmYiIiNxypsWbbxlMQRBGhoVEZ3bSW6f6TNCqFKMt2AItKyEGZpsTg8OOoFxPThggExERkVukDKavGWSA46a9carPjGkpsRAEISjXyxwdNx19b2QYIBMREZFbmnpNEASgINn3GliOm/ZcsFq8SXJHasX1BnPQrikXDJCJiIjILU19ZuQmaqFR+R4+5CZx3LSnXENCglN/DADFaa5rNY6U1kQTBshERETkFleLN/9kMHMSYtBvscNk5TQ9d5htDnQOWoOaQZ6WEguFwACZiIiIaEL+GBIiYas3z5zuC26LNwBQKxUoSI5FQw8DZCIiIqLz2B1ONBv8GSBz3LQnRoeEBLHEAnCVWTBAJiIiIhpHa78FdqfovxKLkQxyKzPIbjkVggwyABSnxqOBJRZERETy9ocX3sA1G+/FL//+Kg629qPfYgv1kqJCU69/h1RI46aZQXbPqT4zlAoBeUnaoF63OC0OHQPDGBqOrlpxVagXQERE5K7a2lrcdctaiHYbXnvid/jeuoeA3FlIjVOjKDUO00f+V5YRj1uXFvil2wK5NI3c4i9K9U8GMy1eA7VSYA2ym071mZCXpIVKGdzvaamTRVOfGXOzE4J67VBigExERGGjevu7EO02QHRCKTpwY3ovFn1mNhp7TWjqM+FwWz9ePdKBYbsTuUlaXD07K9RLjhjSLf5pfrrFLwgCshNiOG7aTcHugSyZPlJz3tAzxACZiIhIjmYuWg4oVVCIDmg0Gtz9hetQUVFy1mNajRbk/ezt0YCO/KOp14TcRC1iVEq/PSeHhbjvVJ8ZlxSnBv26UgY52g7qMUAmIqKwoZs+D1j3EO7M78eGG65GRUXFeY/JSoiBSiFE5fSvQGrqNfutvEKSm6hFffeQX58zEtkdTrQYLUHvYAEA6fEa6GKUUXdQjwEyERGFjbquISB3Fn72oyuQOXLI61xKhYDcJC30RmYm/ampz4Tl01L8+pw5iTH4oKHHr88ZiVqMFjicYkhKLARBcHWyiLIMMk8vEBFR2KjrHkKiVoUMnWbSx+UnadHMDLLfOJziSA9k/wZoOYla9JhssNqdfn3eSBOqFm+S6amxUTdNjwEyERGFjfruIZSmx0MQhEkfl58UC72BGWR/aeu3wOYQUeTnW/xSq7d2HtSbVKiGhEiK0+LR0DMEURRDcv1QYIBMRERho657CCXp8VM+riBZC73RHFV/0AOpqde/Ld4kHBbiHn93EPFUcVoczDYnOgaiZ58YIBMRUViw2p1o6jWh1I0AOT85FmabE70mDhHxh6Y+/w4JkXDctHtO9ZmRqdMgVu2/DiKeiMZOFgyQiYgoLDT1meAUgdIMNwLkkWljeiPrkP1ByiD7O4MpZZA5LGRyp/pMISuvAIDikV7I0VSHzACZiIjCQl2Xqx2YOxlkw8lDwK4X8HbNh4FeVlQ41WdGdkKM3zOYmboYKARmkKcSqiEhkiJpWAgDZCIiInmpG+mXO1UNcm1tLe764lrgo2fw/dvWoba2NhjLi2hNvabRIMmflAoBmboYZpAnIYoiToc4QNaqlchN1LLEgoiISG7quoaQpFUhPX7yFm81NTWwWa2A6ITdZkNNTU1wFhjBmvrMKApQgJaTyHHTk+kctMJid4a0xAJw1SE39ETPUBcGyEREFBbqu4dQmjF1i7fKykpoNBpAUEChUqGysjI4C4xQDqeUwQxMgJbLcdOTOtPiLXQZZEAKkJlB9guDwYB169Zh1qxZmD17Nmpra9Hb24vVq1ejtLQUq1evRl9fHwDXLYS7774bJSUlKC8vx969ewO5NCIiCjN13UMoTddN+biKigpUV1dj2mfuwKJ7fjfuOGpyX6vRAqvDOdrJwN9yErUssZjE6JAQP7fY81Rxahxa+i0YtjtCuo5gCWiAfM8992DNmjU4duwYDhw4gNmzZ+Phhx9GVVUV6urqUFVVhYcffhgA8MYbb6Curg51dXXYvHkzvv71rwdyaUREFEasdidO9ZlQku5ekFZRUYHlN96J/tSSAK8s8jX0um6rBy5AjkHn4DAcTvasHs+p3sC02PPU9LQ4iOKZgD3SBSxANhqNeP/993H77bcDADQaDZKTk7Ft2zZs2LABALBhwwZs3boVALBt2zbceuutEAQBK1asgMFgQFtbW6CWR0REYaSxd6TFmxsdLCQFybHQGy0cFuIj6bZ6IDPIThHoHGQWeTyn+kxI1KqQHKsO6TqkVm/RUmahCtQTNzY2IiMjA1/5yldw4MABLFmyBI888gg6OjqQk5MDAMjOzkZHRwcAoKWlBQUFBaOfn5+fj5aWltHHSjZv3ozNmzcDANrb29Ha2hqoL2FCXV1dQb8mTY57Ij/cE/kJ5z3ZcdIAAEiG2e3f+wmCFSarA0cbm5GsDdifO5/JfV8OnOqAUgCUJgNah41+f/4YmyvgOnCyGWKW+2+AAklOe3K8rQ95OnVI4p2x4uxWAMDehlaUJ9qDfv1g70nAfmPY7Xbs3bsXjz76KJYvX4577rlntJxCIgjClIctzrVx40Zs3LgRALB06VLk5ub6bc2eCNV1aWLcE/nhnshPuO5Jb53rtm7FrEKk62Lc+py5XQCgh12bjNzcxMAtzg/kvC9dw+2YlhKHwoK8gDz/XFssgJOwxyQiNzcrINfwhlz2pMNchxmZiSFfT7ZThFb1KXrt6qiIvQJWYpGfn4/8/HwsX74cALBu3Trs3bsXWVlZo6UTbW1tyMzMBADk5eWhubl59PP1ej3y8gLzw0hEROGlrnsIybFqpE3R4m0sTtPzj4ZeU8DKKwCOm56Ka4peaA/oAYBCIaAoNS5qpukFLEDOzs5GQUEBjh8/DgCorq7GnDlzcN111+Gpp54CADz11FO4/vrrAQDXXXcdnn76aYiiiB07diApKem88goiIopO9d1DKE2fusXbWAXJrqBCb2Dg5YvGAAfI2QkcNz0Ro9kGo8UuiwAZiK5WbwEtynr00Udxyy23wGq1ori4GE888QScTiduuukmbNmyBYWFhXj++ecBAFdffTVef/11lJSUIC4uDk888UQgl0ZERGGkrnsIFxamevQ52QmuMcbNBmaQvTU0bEfHwPDoAa1A0KgUSItTM4M8jtEWbyHuYCEpTo3Dh429EEXR4xLZcBPQAHnhwoXYvXv3eR+vrq4+72OCIOAPf/hDIJdDRERhaNjuwOk+M25d4tkBLpVSgZxELfRGBl7ekm6nF6cF9vBcbhKHhYxHLkNCJMVpcei32NFrsnlU7hSOOEmPiIhkrbFnpMVbhudBWkFyLPTMIHtNup0+PYAZZADISdCibYAlFuc6k0GWS4Ds+hmMhjpkBshERCRrdd2uQRWe9ECW5CdpWWLhg4bewPZAluQkxjCDPI5TfWbEqBTIdLNzS6BNj6JeyAyQiYhI1qQAucSbADlZy2EhPmjocQ2pSI0L7JCKnEQt2geGuU/nONVnwrTkWCgU8qj3PRMgD4V4JYHHAJmIiGStrmsIKR62eJMUJMdiyOqA0RL8wQaRoKFnCMWpcQE/kJWTGAObQ0TPkDWg1wk3p/rMsimvAIAErQoZOs3onYVIxgCZiIhkrb57yKv6YwDIT3IFFyyz8E6geyBLchJdrd5a2ertLK4AWR4dLCTFqdHR6o0BMhERyVrdSA9kb4wOC2GA7DGnU0RjjyngHSwAICeBw0LOZbE50DEwjMJU+WSQAVeZBQ/pERERhZDF5sBpg9mr+mNgzLAQtnrzWPvAMCx2Z1AzyBwWcsZpg7w6WEiK0+Jwqs8Mu8MZ6qUEFANkIiKSrcZeE0TRuw4WAJCdyGEh3pIOYgVySIhkdNz0AN/ISE71yjVAjofDKaI5widUMkAmIiLZqusaafHmZQ2yWqlAdoKW46a9EKwWbwAQp1EhUatiBnmMM0NCZFaDnBYdnSwYIBMRkWz50uJNUpCshd7IDLKnGnpMEITgBWi5iZymN9apPjMUApA3UkcvF9IdhUjvZMEAmYiIZKuuewipcWqkxnk/1jY/OZY1yF5o6DGhIDkWGlVwQgUOCznbqT4z8pK0UCvlFarlJ8dCpRAi/qCevF51IiKiMep96GAhkabpcQiFZxp7TUGpP5Zw3PTZTvWZZFdeAQBKhYDClNiIb/XGAJmIiGSrzoceyJKC5FgMDjvQz2EhHmnoMY1OTgsGKYPMNzIuchsSMlZxWuT3QmaATEREsmSxOdBsMKPExz68o72QWWbhNrPNgdZ+S1AO6Enyk2NhtjnRzWl6sDuc0BstMg6Q43lIj4iIKBRO9oy0ePMxg5yfzGl6nmoKYgcLycyRfT7RFdmBlzta+y1wOEVZllgArmEhPSYb+i22UC8lYBggExGRLNWPdLAoTdf59Dycpuc56fZ5MKboScoyXPt8vHMwaNeUq1N98uyBLJHeOEXyQT0GyEREJEtSD+SSdN+yaLlJWggCSyw8MRogB7EGuSg1DhqlAse7GCAfG3mTINsAWWr1FsF1yAyQiYhIluq6B5EWp0aKDy3eAGlYSAxLLDzQ0DuEeI0SGTrfXntPKBUCStLjoj6DbLY58IvqOszO0qE0w7e7J4FyZlhI5AbIqlAvgIiIaDz13Sa/BQj5SbGcpueBhh4TitPiIAhCUK9blqnDkfaBoF5Tbn65vR5NvWZs/3oFlIrgvv7uSonTIDlWHdEBMjPIREQkS3Xdgz73QJZwmp5nGnqC2wNZUpahw8keE2wOZ9CvLQcNPUN4eHs91i/MxaqS9FAvZ1LTU2NZg0xERBRMZpsDzQaLTyOmx8pPjkUzM8huEUURDb2moB7Qk5Rl6GB3ihEdeE3mnq2fQqUQ8Otr54R6KVOK9FZvDJCJiEh2To52sPBTgJykxcCwPaLbUvlL56AVJqsjqC3eJGWZrv2OxjrkV4904NUjHfjxFTNHWxPKWXFqHBp7zXA6I3OwCwNkIiIf1NbWYtOmTaitrQ31UiLKaIs3H3sgSwpGAg7WIU9NygqGJkCWWr1FbmZyPBabA/dsPYxZmTrcc3FxqJfjluK0OFgdTrT2R+bPFA/pERF5qba2FlVVVbBardBoNKiurkZFRUWolxUR6rqlFm/+yyADrmEhc7IT/PKckUo6eBXMMdOS1DgNMnSaqGv19qt3T6Khx4R3vroCGlV45C6nj2n1Fg4Zb0+Fxy4QEclQTU0NrFYrHA4HrFYrampqQr2kiFHXPYT0eNdJeX+Q/oCzF/LUGkbqf4tCECADrjrkaAqQG3tM2FRdhxsX5KBqZkaol+O2SB8WwgCZiMhLlZWV0Gg0UCqV0Gg0qKysDPWSIkZ995Df6o8BIDdxZFgIeyFPqbHHhNxELWLVypBcvyxDF1U1yN/ZdhhKhYDfXDc31EvxSGFKHAQhcnshM0AmIvJSRUUFqqur8eCDD7K8ws/quob8Vn8MABqVAlm6GGaQ3eDqYBGa7DHgOqjXOWiFwRz5BypfP9qBbZ924IHV4XEwbyyNSoGC5Fg09EZmvThrkImIfFBRUcHA2M9MVjv0Rv+1eJPkJ2s5Tc8NDT1DIe3BW5YhHdQbxPLClJCtI9AsNgfufvkwyjLi8Z1LwuNg3rmKU+OYQSYiIgqGkyN/cP1ZYgG4Olkwgzy5YbsDeqMlJENCJKOdLCK8DvnXNSdxsseER9fOD5uDeeeanhrHGmQiomgjiiI+buzFgVYjek1WiGJk9vuUm3o/90CWuMZNM4M8mVN9ZohiaFq8SYrT4qBSCBFdh3yq14RfVNdhXXkOVpeFz8G8cxWnxaGtfxgmqz3US/E7llgQEU3gsRf/g2/+7h9A/nwgdxbiNErkJ2lRkBx75v8ma3FJcRpmZ7F1mL/Udfm3xZskP0kLo8WOAYsdCVr++RuPdLs8FFP0JGqlAsVpcTjeFRm1rTaHE8c6B3GwtR8HRv63R2+AIITfwbxzSW+kmnojr30if0MQEY2jtrYW3/7SWsBqhVqjwdd/8zRU+XPQbDBDb7Cguq4brf0WOEVgVqYOR+9bFeolR4y67iFk6DRI8lOLN8nosBCjGbO1kfXH3F/OBMihyyAD4d3JwmJz4C87TmOP3oADrf040jEIq8MJANAoFZibrcO1c7PxxcV5KEgJr4N555LeSDX0mhggExFFg5qaGthtVkB0wmm3IdtYj/u/ceNZj7E7nPjvV4/i0Q8bYXc4oVKyas0f/N3iTZKf7BoWojdYmPGfQEPPELQqBbITYkK6jrJMHd460QWHU4RSIYR0LZ567WgH7t56GFkJMViYm4gryjJQnpOIBbmJKMvUQR1BvyeKR4eFREa2fywGyERE46isrISgVEOEbcIexyqlAnOzE2B3img2WDA9xFm3SFHXPYTLS/3fRSE/yZWtYyeLiUkt3gQhtEFpWUY8hu1OnO4zh93P1YmR0pD6+y+DLiayw6wMnQbxGuXowdpIEjlvY4iI/KiiogKpG36JRTd+Y9Iex9Kt6EjMoISCyWpHSwBavAFA3si4aXaymFhDjykkI6bPJXWyONY5EOKVeO5ktwlZCTERHxwDgCAIKEmPx8nuyPv9xwCZiGgcAxY7uhOLceOdd0/a53jGSIAciRmUUKjvDkyLN2BkWEhCDPRGZpDHI4oiGnpMIT2gJxnthRyGB/VO9gyN/l6IBiXp8aOdZyIJA2QionFIPVhnjWSyJpKXFAuNUoGTzCD7RV2363WfmTH56+6t/CQOC5lIz5AVA8P2kB/QA1y37pNj1WF5UO9kz1BA7oDIVUlaPBp6TXA4I6sNJgNkIqJxSH+YpwqQlQoB01NjI3aaVLBJ9Zv+HDM9Vn6SFnoDSyzG0zAy8CGUQ0IkgiCgLCM+7IaFWGyuQSszZJCFD5aS9DjYHGLEvfFkgExENI5jnYNQKgS3/tAVp8Uzg+wnJ7qGkJMYuPpNTtObmFxavEnKMnU43hleP1eNvSaIIqKuxAJAxJVZMEAmIhrHsc5BFKfGuTUCdkZaHE72mDhpzw/qugYDVl4BAPnJsTCYbRgcjrzJX76SAmQ5HNIDXHXIrf0WDFjCZ6+kswgzoqnEggEyEVH0ONY5OGV5hWRGejz6LXb0DFkDvKrId6J7CEl99di0aRNqa2v9/vz5UieLCLsd7A+Nva7uC/Ey6b5QlukKvE6EUZmFFCSWRFEGOTdRC61KgbpoC5AfffRR9PX1BWMtRESy4HCKqOsecjtAHm2W38s6ZF8YzDZ0nTiINx78Kh544AFUVVX5PUg+M02PZRbnaugxyaL+WHKmk0X4BMgnu4eQqFUhLV4T6qUEjUIhYEYEdrKYMkDu6OjAsmXLcNNNN+HNN9/kLUQiinin+kwYtjs9yiADrv6n5L26riFAfwgOmw0OhwNWqxU1NTV+vYY0TS/SDhT5Q0PvkGzqjwHXrXtBQFjVIZ/sMWGGDAatBFtJWlz0BcgPPfQQ6urqcPvtt+PJJ59EaWkpvv/97+PkyZPBWB8RUdAdc7ODhaR4tBdyZP2BCLYTXYNA/nxoNBoolcoJJxj6gsNCxmdzuKbWySlA1qqVKEqJC68McpS1eJOUpMfjZI8Jzghq9eZWDbIgCMjOzkZ2djZUKhX6+vqwbt06/Pd//3eg10dEFHRSgFzmZoAcq1YiN1HLYSE+OtE1BEXeLLz51lt48MEHJ51g6K0YlRKZOg1rkM9xus8MpwgUp8oruJuVqQubXsgOp4jGXlNUtXiTlKS7RoO3RNAbzykr8R955BE8/fTTSE9Pxx133IH/+Z//gVqthtPpRGlpKX71q18FY51EREFzrHMQ6fEaj+oIi9PiOG7aR3XdQyhMicOlF6/EpRevDNh18tnq7TyjHSzSYkO8krOVZcbjvYYeOJ0iFAp5ly00G8ywOcSoavEmGe1k0TOEghR5fQ95a8oAube3F//6179QWFh41scVCgVeffXVgC2MiChUPOlgIZmRFod36roDtKLocKJrEDMDNCBkrPwkLRp5oPIsDb2uN3dyyyCXZehgsjrQYrTIPvA6OVKDG00t3iSlY1q9rSpJD/Fq/GPKEouf/vSn5wXHktmzZ/t9QUREoeZVgJwejxajBWabI0CrimyiKOJE19DoH9pAKkiO5TS9czT0mKBRKpA7UqMtF1KZUzjUIUslViVRWGKRnxwLjVIRUQf12AeZiGiMXpMVXYNWjwNkqT1WEzOTXukctGJg2B7QISGS/CQt+sw2DHFYyKiGHhOKUmOhlFkZw2irtzCoQ67vHkKMSjF6EDSaKBUCiiOskwUDZCKiMY572MFCMtrqjQf1vCINgygNRokFeyGfp6HXJKsOFhLX2HEljnfJP/A62TOE6alxsq+VDpSS9HjUe9HqUhRFPLHrNBpl9ruTATIR0RietniTSAdzTkZQBiWY6kYCoGDUIBckc5reuVxDQuRXGiAIAsoywqOTxckeU1S2eJOUpMehvmfI43kZzQYzbvvnAbx5vDNAK/MOA2QiojGOdQ5Co1SgyMOJYunxGiTEqJhB9tKJriGolQIKUwKfxcxPYgZ5rD6TFQazTZYZZMBVZnFM5jXIoiiivnsoKjtYSErS4mGyOtA+MOzR5+1r6QcALMxNDMSyvMYAmYhojGOdgyjNiPe4FlMQBLZ688GJ7kHMSPP8dfeGVCPKaXouUos32QbImTqc7jPDZJVvzXjnoBVDVkdU9kCWlIzpZOGJ/S1GCAJQnsMAmYhItrzpYCGZkRbHDLKX6rqGglJeAbgmtGXoNMwgj2jolXmAPPJ9USfj8qUzLd7k+RoGg7cB8r4WI8oydIiPmbLzcFAxQCYiGmFzONHQY/IhQI5HY29kjVsNBqdTRF33UFA6WEjyk7SsQR4hHY6a7mFZUbCMtnrrlHGAPHLnKJprkAtTYqFSCJ4HyK39siuvABggExGNOtk9BLtT9D5ATo/DsN2J1n5mJj3RbDBj2O4MSg9kSX5SLJrZCxlmmwNPfNKMGWlxSNSqQ72ccUnfF3LuhVzfbYJCAIqCUEMvV6qRsxuedLLoNVlxus+MRXlJAVyZdxggExGN8LaDhUTqAnCSdcgekW6dB6vEAnC9manvGYIjyrP99792FMc6B/GndeWhXsqE4mNUKEjWyrqTxcmeIRQkx0Kjiu6wSupk4a790gG9PGaQiYhkSwqQvQ3UpPrDk170Ao1mJ0ZbvAWvxKI8JxEmqyOqD1VWn+jCIx804q6V03H5zIxQL2dSZRk6WWeQT/aYovqAnqQkLR713e63etvfagQAZpCJiOTsWOcgchO1Xt9qnpbsmkTGDLJnTnQNIl6jRE5iTNCuKZ2YP9jWH7RryonBbMOXn9uPsox4PPyZWaFezpTKMnU43ul5j91gqe8eQkkUH9CTlKTHo99iR/eQ1a3H72sxIi9Jiwxd8H723cUAmYhoxPGuIa/LKwBXDV5hSuxo2yxyT133EErT4yEIwZtANic7AQoBONg6ELRrysndLx9G28Aw/vaFxYjTyKt7wHjKMnQYGLZ73GM3GPotNnQPWZlBhuedLPa1yPOAHsAAmYgIgKvRvy8t3iRs9ea5E13B7WABAPt370La4VdQ88GHQb2uHLx0sBV/26PHDy8vxbJpyaFejlvKMkcO6smwDlkqqYrmFm8STwJks82BY52DsiyvABggExEBcDX6N5htfgiQ41li4QGbw4nGXhNKg3hAr7a2FlVVVeh6+6/44H++idra2qBdO9Ta+y346gsHsbQgCT+4vDTUy3Fb2cgbKDnWIbPF2xlFqbFQCHCrk8XhtgE4nKIsD+gBDJCJwkptbS02bdoUVX/Qg+VYp+tWuz8C5F6TDQazzR/LiniNvSY4nGJQO1jU1NTAarUCTidEuw3/eWd70K4dSqIo4o7nD2DI6sDfPr8IamX4hAAFybGIVStk2QtZypZKXWyiWYxKiWkpsW5lkPe1yPeAHgDIv/CIiACcyXpZrVZoNBpUV1ejoqIi1MuKGFIHC+lWrrekaWQNPUNYnJ/s67IiXig6WFRWVkKj0WDYaoVTUCJv7tKgXTuUHt95Gq8d7cQjn52LWVkJoV6ORxQKAaXp8uxkcbLHhEydBglahlTASCcLN+6i7W81IlGrku2AmoC+fSwqKsL8+fOxcOFCLF3q+gXU29uL1atXo7S0FKtXr0ZfXx8A1zvbu+++GyUlJSgvL8fevXsDuTSisCNlvRwOB6xWK2pqakK9pIhyrHMQcRol8pNifXqe0VZvrEN2y4mRgCeYQ0IqKipQXV2N/3f/A8C6hyDmyL+Lg68aeobwnW2foqo0Hd+6aHqol+OVssx4edYg9wzxgN4YJenxbmaQXQf0gnk41xMBv7/y7rvvYv/+/di9ezcA4OGHH0ZVVRXq6upQVVWFhx9+GADwxhtvoK6uDnV1ddi8eTO+/vWvB3ppRGFFynoplUpoNBpUVlaGekkR5XjnEMoy4qFQ+PbLenRYiIfjVqNVXdcQUuPUSIvXBPW6FRUV+OXPfoSE6fMivtWbwyni1n/sg0oh4In1C33+Hg+VsgwdGntNGLY7Qr2Us5zsMbH+eIySdFeZWa9p4lZvDqeIg239si2vAEJQg7xt2zZs2LABALBhwwZs3bp19OO33norBEHAihUrYDAY0NbWFuzlEcmWlPV68MEHWV4RAK4OFr7fdk7QqpCh06Chlxlkd4Sig4VEEASU5yREdIDcNTiM+187io+a+vD7G+ajIMW3OyShVJapg1OU1yCeYbsDzQYzZqTJs0wgFKQ3C5PtU13XIExWBxbmyjdADmjBjCAIuOKKKyAIAr761a9i48aN6OjoQE5ODgAgOzsbHR0dAICWlhYUFBSMfm5+fj5aWlpGHyvZvHkzNm/eDABob29Ha2trIL+EcXV1dQX9mjS5aNmTwsLC0TeYofje90Q47YnZ5kRTrwmfm5Xkl9e1IEGNI619stsjOe7JsQ4jKgoSQvZazUhSYeuxXrS0tITsVq8/98XqcGJ36xDea+rHe039ONTpClI+OysFq7IF2X1PeiJNMAMAPj5+GsnOlIBdx+4U0dzW6dZj63ssEEUgVWkN69fWn5JE1z59UqdHnmr8IHn70V4AQJ5m2O3XLdi/vwIaIH/44YfIy8tDZ2cnVq9ejVmzzq7zEgTB419IGzduxMaNGwEAS5cuRW5urt/W64lQXZcmxj2Rn3DZk4Ot/RABLJuR65c1z85px4eNvbL8+uW0JpPVjtYBGxZOywjZuipKrHj6QBcccSmYlhK6LKAvX/+JrkG8dbwL/znehXfruzFkdUClEFBRlIIHFxfgyrJMLMlPCtvSCoku1QbgGLrsmoB9vzicIqr+VIvm3kEcv38BVFN0+thncCX5lpXmIzc3cEF7OEnNcEAQjqDHOfE+ndprgEapwKXziqFRuV/MEMzfEwENkPPy8gAAmZmZWLt2LXbt2oWsrCy0tbUhJycHbW1tyMzMHH1sc3Pz6Ofq9frRzyciCiR/dbCQzEiLx7P7WmC1Oz365R9tpF6poSqxAMaOnB4IaYDsrac+acaXn9sPwDWkZsPSAlxRloFVJWlej0yXq0StGrmJ2tGf10D4/YeNeO9kDwDg30c6sHZ+zqSPlw6jscTiDK1aifwk7aQH9fa1GDE3Wyfr348BW9nQ0BAGBgZG//+33noL8+bNw3XXXYennnoKAPDUU0/h+uuvBwBcd911ePrppyGKInbs2IGkpKTzyiuIiALheNcgBMF/nRSK0+LgFIFTffKplZSjuu7gd7A417wcV935wdbwrEP+qKkXqXFq1N9/Geq/X4U/fG4+rp+XHXHBsWRWpi5gnSxOdg/h/teP4urZmchL0ODRDxun/pweExJiVEgP8iFTuStJj0dd1/gBsiiK2N8q7wN6QAAzyB0dHVi7di0AwG634wtf+ALWrFmDZcuW4aabbsKWLVtQWFiI559/HgBw9dVX4/XXX0dJSQni4uLwxBNPBGppRERnOdYxiMKUWMRp/PMrUcomnewxoTSE2VG5k3ogB3OK3rkStWpMT40L24N6xzoHMScrATOipIvCrEwdntmrhyiKfq0ZdzpF3P78AWiUCmy+sRyPvXsUP/+gBYfa+jE/Z+JJb64Wb3GybVUWKiXp8dh6uH3cf2vtt6Br0CrrA3pAAAPk4uJiHDhw4LyPp6Wlobq6+ryPC4KAP/zhD4FaDpHsffTRx3jtrWpcu+ZydqgIsmNdgz5P0BtrRjpbvbnjRNcQchO10MWEdsBCOHeyONoxiLXzs0O9jKCZlamD0WJHx8AwshO1fnvexz5uwnsne7DlpgXIS4rFzfPT8b+1bfj9h434840LJvy8k91DkwbQ0aokLR5dg1YYzTYkxZ59N2Nfi+tnbZFMR0xL5Fv8QRRFamtrcVlVFTY9+BOsvHQVfvLENtn1+oxUTqc40uLNfwFydkIMYtUKDguZQl3XYEizx5Ly3EQc7xyExRZeP3Pdg8PoHrL69XtX7qSv1Z91yI09Jtz32lFcWZaBr1zg6qaVGqvCF5fk42979BP283U4RTT0mjgkZByjrd7Gmai3f2TE9AKZZ5AZIBPJQE1NDWw2KyA64bTb8NO/voSCB9/B/a8dxSn20w2oFqMFJqsDZX4shRAEAcVp8WhwY9xqNDvRPYSZcgiQcxLhFIEjHQOhXopHpCBxdhQFyNJBWn8FyKIo4o7nD0AhCPjLjQvOKpW4a+V0mG1O/HVn87ifqzeYYXOIo9Mz6QwpQK4fpxfyvhYjStLjZT+amwEykQxUVlYCSjWgUCBWG4PffvNmXFSUil+9W4/pv6jGdVt24c1jnXA6xVAvNeJIf2j9nYWbkRbHDPIkDGYbugatIT2gJynPHelk0RqeAbI/BtyEi/ykWMRplH4LkDfvOIXt9d349bVzzhuiUp6biEtnpOEPHzfCMc7vXunnm1P0ziedwxivk8W+ln7Zl1cADJCJZGHJsuVQ3PgQLvni3aiursY9N1+Nl7+yDE0/uBzfryrFztN9uOovOzHz4e3YXtcd6uVGlONdgQqQ49HQa4Io8k3NeKQT7qFs8SaZkRaPWLUi7OqQj3UOQqtSYFoYT8fzlEIhoCwjfvTn1hen+0y4999HUVWajjtXTBv3MXetLEJTrxmvHuk479+k8gG2eDtffIwKOYkx5wXIBrMNjb0m2R/QAxggE8nCp+0DcGSX4Rvf/u5ZB/QKUmLx0FWz0PzAajz7xcUw25x46J0TIVxp5DnWOYgkrQpZCTF+fd7itDiYrA50DAz79XkjxYmRAEcOJRZKhYC52eF3UO9o5yDKMnVQhvkAEE/NykzwOYMsiiLufP4gnKKIx29aMGEXiuvnZqMgWYvffXB+y7f67iFolArkJUXPGxRPlKTHo/6cMrMDra76Y2aQicgte0cOLSzKH/9dtUalwM2L8rB6ZvqEvSXJO9IBPX+3aRrb6o3Od6JrCArB9UZCDspzEnEozAJkfx8uDRezMnU41WeGyWr3+jn+uqsZb53owq+umYOi1Im/B1VKBb5xYRG213fj0/azS3BO9pgwPTU26t6guKskLf68DPL+0Q4WzCATkRv2tRihi1GiZIrT0DMzdNAbLRga9v4PA50tUEHGjElOcRNQ1z2EwpQ4xKiUoV4KAFeA3DloDZuMv9nmQGOvKaoO6ElmZeogiq7vIW/oDWb81yufonJGGr5WUTjl4+9YPg0xKsV5g0NOdg+x/ngSJenxaOsfPuvv1b4WI7ISYvzaoi9QGCATycBevRELc5OgmCITId2OPve2FXlnwGJHi9GCsgAEGUUpcRAE4OQ4p7jJVWIhh/IKyZmDeuGRRa7rGoIo+r92PhyMdrLo8LzMQhRFfPXFg7A7XaUVU/3OBYB0XQxuWZyHv+3Ro2+k5ZsoiqjvGYqaAS3eONPq7czvwHA5oAcwQCYKOYdTxIE298ZuSgeajncyQPaHEwE6oAe4ymIKkmPR0Mu9OpcoijjRNSSLDhaS+dkjI6fDpMxitINFVvQFyKXp8RAE71q9fdjYi9ePduLnV83yKLi9a+V0mKwO/HWXq+Vb16AVg8MOHtCbREn62Z0shu0OHOkYCIvyCoABMlHI1XUNwmR1YLEbvzSkXzgn/HCCmwLX4k0yIy2OGeRxdA5aMTBsl0UHC0m6Lga5idqwCZCPdgxAEOTRBSTY4jQqFKbE4rgX5zF2nOoDAHxxcZ5Hn7cwLwkXF6fiDx81weEUx3SwkM+bPLmRXhspQP60fQB2pxgWHSwABshEIXfmgN7Ut53iNCoUJGtxggf1/OJY5yCUCiFgf+RmpMWzBnkccupgMVZ5bkLYlFgc6xxEUUocYtXyqOEOtlmZOhzr9Lxv9R69EYUpsUjXed615q6V09HYa8JrRzrYA9kNSbFqZOg0oyWB4TJiWsIAmSjE9rX0Q6NUYE6We83+Z2bomEH2k0Nt/ShNj4dGFZhfhcVpcegctGKQhyrPInVikcOY6bHKcxJxpGMQNocz1EuZ0tHOQcyOwvIKyaxMHY53DXk8PGmP3oglE3QLmspn52UjP0mLRz9sRH33EAQBKEpli7fJjO1ksX/kMHq4ZN0ZIBOF2L4WI+bnJECtdO/HsSzD9YeBAyh8d7BtAAtyA5fNkP4QNLDV21lOdA1BrRRQmCKv+s3ynERYHU7Z36FxOkUcj9IWb5JZmTqYrA7ojWa3P8dgtqG+ewhLC5K9uqZaqcDXLyzCO3XdeO1oBwqSY2XThUWuStLPBMj7WoxYkJPo1sFIOWCATBRCoihir97o0aGFmRnxMJht6BmyBnBlka/fYkPj4b3oq/kHamtrA3KNM72Q5R1wBVNtbS3+/eTvkTvUJLv+seHSyeK0wQyL3RnVAXLZSO21Jwf19upd5WzeZpAB4M4VrpZvu5uNU7blJFeA3GywwGS1u30YXS4YIBOF0Ok+M/rMNiz24Be2VLcp9yyX3D372nbgxR/inScfQVVVVUCC5NFeyDyoB8AVHFdVVeHIv/6E5r98N2BvTLxVlqGDWinI/qDe0Q5X7W009kCWSG8OPAmQ9+gNAIAl+cleXzdDF4PPL3Id8JuRLq87IHIkHSx/+0Q3BocdWMgAmYjcsU86oOdRBnmk1RvrkH3y+tvVgMMOp9MBq9WKmpoav18jOVaNlFg1W72NqKmpgdVqBUQnRIc9IK+5LzQqBWZnyn/kdKC7r4SDrIQYJGlVHrW83N1sRFFqLNLiNT5d+66VRQDOZLFpYqXprtfohQOtAMLngB4AqEK9AKJotrfFCIUAlOe4d0APAApTYqFWCswg+0gomA8o1VCKdmg0GlRWVgbkOjPS2epNUllZCbVGA4dlGCq1OmCvuS/KcxNQU98T6mVM6mjnINLjNV51YogUgiCMdLLwLIPsS/ZYsjg/Gdu/XuFTqUa0kDLIr3zaAZVCwNxs9//WhRozyDSh2tpabNq0SXa3QSPJvpZ+zMrUIU7j/ntVlVKBGWnx7GTho86E6Vh4z+/w4IMPorq6GhUVFQG5Dlu9nVFRUYFfPvEicNEt+L+//Stgr7kvynMSoTda0GuSb41/oMajhxtPAuQ+kxUne0x+C2pXlaQjUav2y3NFspQ4DVLj1BgYtmNOVkJYHWpkBjlK/fGFN/H4S69h/rILsfKiC1GUEofC1FhMS46FVq0crRW0Wq3QaDQBDSCi2V69EatK0jz+vJkZ8cwg+8DpFHGovR9fuehC3L92XkCvVZwWh5cOtsHucELlZqeSSKbKnwNccCPWXrEq1EsZV3mO6xbwobZ+XDojPcSrGd+xzkFcPzc71MsIuVmZOjy1W49+i23KYFU6oLfUDxlk8kxJejx2nTaEVXkFwAA5KtXW1uLuL34WDpsN+17ajKfXPQTkzhr995zEGKh2vwTz8DDgdI7WZzJA9q/OgWG09lu8OtU7M0OH/xzvgtMphk3LHDlp7DVhcNgR0BZvkhlp8bA7RTQbLJjOsbQ40TWIeI0SOYnyLA+YnyN1shiQZYDcM2RF16A1qnsgS8pGsujHO4ewbFrypI/dMxIge3IgmvyjJM0VIIfTAT2AJRZRqaamBg6bDRCdUIoO/HepCe9940I89fmF+NmaMlw1KxNZs5cAChUEhTKg9ZnRTDqg580v7LKMeAzbnWg2uN8DlM6QDmFJ2cJAmpEWB7Qew0O/+EXUlytZbA68U9eNskwdBEGeb+xyEmOQFqfGoXZ5HtTjAb0zznSymHqi3h69fw7okeekaYPMIJPslS+7EFCqIDgd0Gg0+OxVV6BiRhouwZhb/esXYq7VAUvjAfz9exuYPQ4AacT0Qi+ymGM7WRSmMivpqYOt/VAIwNzswAcZhpOHgRd/iCecdjz72G8ivlyptrYWNTU1qKysPOvrFEURX3vxID5tH8C2rywL4QonJwgCynMT8dFHH2NT/evnfR2hdqbFW/gcdgqUGWnxUCoEHHej3Gy33sDyihBZPTMdrx3t8MsByWBigByFFHmzgHUP4Y48I77yuc9M+Mv/ghUVeDN5hqz+OESSfS2ujEZKnOcZjbG9kK8o8/fKIt+BkRHTnhyO9NaRPR8DDjtEMfLLlSY7u/Doh414arceP72yDNfNk3f9bIaxAe/+4dt4QHTI7gzGsc5BaFUKTEvhiGONSoEZaXFTHtTrM1nR0GPCncunBWllNNbK4jTs/s4loV6Gx1hiEYX26I0Q8mbh1w/9eNJf+rMzdWgfGIbBbAvi6qLHvpZ+LPayJisrIQYJMSoe1PPSwdb+0alpgbZq1SooVGpAoYj4ciWpz7HDcXZv6Zr6bvzXK0fw2XnZ+OHlpaFdpBvspw8BDvt5X4ccHOscxMwMneymEIaKO50szkzQSw7CiihSMECOQrubDZiZHj/lqV/pEIh0S4/8x2i2ob57yOuxm4IgjHSyYKs3Tw1Y7DjZYwrKAT3A1drsxge3QLXyi/jPW2/LJhMZCJWVldBoNFAqz5xdONVrwo1P70Fpejye+vzCsDhU+pkrqgClCgql/M5gHO0c5AG9MWZl6lDXNQS7wznhY3ZLAXJBeB0So9BigByFduuNWFqQPOXj5mS5atyOdjAI87cDra4DQL6cqJ6ZoWMG2QuH24N3QE9yzeWXwr50HVJLyoN2zVCoqKhAdXX1aG/pBUuWYe2Tn8DqcGLbbcvCpm/szVdfBuHGh1D5pbtlVV5hsTnQ2GviAb0xyjJ0sDqcaOqb+MDyHr0B01PjkOpFORtFL9YgR5n2fgtajBa3mqUXpcYhRqXAUQ8mFZF7vBkxfa6ZGfF4bn8Lhu2OsGq+HmrB7GAhkfZ5r94QVpOkvFFRUYGKigqIoogv/WMf9rf249+3XTB6sDQcxGlUmFm+BEnZCaiokM+BwhNdQxBFV/kbuZzpZDE42i3hXLubjVjK7DF5iBnkKCP1gnQng6xUCCjL0LHEIgD2thiRlRCDnESt188xMyMeogiOMfbQgdZ+JGlVQT3kVJYRj1i1Avta5Nk6LBB++34DntnbggfXlOEzc7JCvRyPlecm4mCrvPZrtMUbSyxGnemFPH4ip9dkRWOvifXH5DEGyFFmj94IQXA/czk7S8cMcgDsazF6fUBPUjam1Ru5TzqgF8w+vCqlAgtyk0Zb+0W66hNd+O6/j+CG+dn4fpX8D+WNpzwnESd7TBgctod6KaOOdQ5CEBBW2fhAS4vXIEOnmfCg3pkDeswgk2cYIEeZ3c0GzMrUQRfjXnXN7EwdGntNMNscAV5Z9LDYHDjSMehz0/TSMa3eyD2iKOJg20BQyyski/OSsK/FCKdTDPq1g6mxx4T1f9uD2VkJePLmRbIdCDIVaaLe4fbA3kF7Ytdp1DS598bpaMcAilLiEKtmSdVYk3Wy2N1sAMAAmTzHADnK7NEbPWqWPjsrAaI48e0r8tyhtgE4nKJP9ccAkKhVIzshhp0sPNDUa8bAsD1oHSzGWpSXiH6LHQ29kVkSYzTb8OSuZlz1lx1wiMDWryxDgjZ8j7nMz3HVih9qC1yZxZ9rm3DbPw/gG682otdknfLxxzoHeUBvHJMFyHv0RhSnxXnVb56iGwPkKNJqtKC1370DepIzrd4YhPmLLyOmz+Vq9cYMsrtCcUBPIu33vggqszDbHHjpYCs+9+QnyPrJW/jKP/fD5hTx0oalEx6YChdFKXGI1ygDlkF+/WgHvvHSIVQUpqB/2IFN1fWTPt7pFHG8iy3exlOWoUP3kBXdg8Pn/dsevZHZY/JK+L69J4/t0RsAuHdATzIzIx4KATjqxqx7cs/eFiOStCpM98OI6JkZOrzyabsfVhUdDrT2QxCAeSHoJDE3OwFqpYC9eiNuXJAb9Ov7i93hxPb6bvxjbwv+dagdA8N2ZCXE4GsVhfj8ojxcMC05bMsqxlIoBMzLTghIBnmv3oCbnt6DBbmJeOurK3D7M7vw6IeN+NZFRROOjj9tMMNsczKDPA7pNTneNYR0Xczox3uGXAf0vlZRGKqlURhjgBxF9uiNUAjAQg9uL8eolChOi2cG2Y/2tRixMC/JL0HEzIx4dA5aYTDbkBwbHj1mQ+lgWz9K0uIR72YNvj/FqJSYm5UwemgoHL16pAO3/3M/OgetSNKqcOOCHHx+UR5WlaRH5GS3edmJ2PZpO0RR9FvQf6rXhM88vgtp8Rq8evty6GJU+O6FuXjleB8eePM4nv7ConE/b7SDBQPk88wa08nioumpox/nAT3yBUssosjuZgNmZyV4HBzMzmQnC3+xO5w42Or9iOlzSafZ61hm4ZZgjpgez+L8JOxrNUIUw/Og3u8+aECMSoF/fXkp2n9yBbasX4jLZ2ZEZHAMuOqQu4es6Bycuj7YHQazDVc/vhNmmwOv37EcuUmuNo95iRrcc3Ex/r5Xj/0TlOBI7TbZA/l8Ralx0CgV59UhS3dN/VHORtGHAXKUEEXR61qs2Vk6nOganHSUJ7nnWOcgLHanzx0sJDNHOlmw1dvUhobtqO8ZCskBPcnivCR0DVrRYrSEbA2+ONg2gMtLM7B2fg60UdBJQepk4Y8yC6vdiRue/AR13UP415eXnTcw5ntVJUiJVeO+V4+O+/nHOgeRFqc+q4SAXJQKATMz4s8LkHfrjZjBA3rkJQbIUaK134L2gWGPOlhIZmcmwOYQI/b0fTD5Y4LeWDPSXDXi7GQxtcPtAxDF0BzQk0iZrHAss+gcGEbHwPBod4doINWq+xogi6KI25/fj3fre7DlpgW4rDT9vMckx6rxw8tL8daJLrx9vOu8fz/aOYjZWdHz2ntqvE4We/QGDgghrzFAjhK7m6UJet5lkAF2svCHfS390KoUfqsj1KgUmJ4ax04WbghlBwtJeU4iBAFhOTDkkAxev2DLTIhBpk6DQ22+HVL+0ZvH8fc9rqmCX1paMOHjvnFREYpSY/Hfrx45r182W7xNrixTh4ZeE4btrp79PUNWNPWaWX9MXmOAHCX26A1QCPDq9vKZAJmdLHy1t8WI8txEqJT++9GbmaFjBtkNB1r7kRCjQlFq8EZMnys+RoVZmbqwbPU2+gYjhCUqoTA/J9GnVm9bdp7GQ+/U4fYLpuEHl08+VTBGpcTPr5qF/a39+Me+ltGP9wxZ0TVoZYA8iVmZOjicIk52u+50etO1iWgsBshRYnezEXOzExCn8fz0fqJWjbwkLQ/q+UgURez3w4jpc0m9kMP14FewHGzrR3lOQshbkC3OSwrLEotDbQPISohBRpTVwM7LTsCnHQNeTUDceaoPX33xIK6YmYHH1s1363vv5oV5WJyfhB++cQyWkQmmUukAeyBP7EyrN9drtUfvv37zFJ0YIEcB1wE932qxZmfqWGLho5f+8y6M7z+LxN7JBwJ4amaGDkNWB9r6z2+STy6iKOJgaz8W5Ib+j+WivCTojRZ0jTPUQM6kNxjRZn5OIkxWh1dnMF482AaVQsALG5ZA7eZdI4VCwK8+Mxun+sz4w0dNANjizR1lIx19pNdqz8gBPba/JG8xQI4CeoMFnYNWLPXhnfTsrAQc7RxgltJLtbW1uOWznwE+egaP3PNF1NbW+u25pU4WLLOY2Ok+M4wWO8pzQx/gheNEPYdTxKftA1FVfyyRDiUe9uKg3o5TfViUl4RErWdBWtXMDFxZloGfv1OHPpMVRzsGoFUpUJji+3ChSJWgVSEvSTsaIO9uNrC8gnzCADkKSLVYS3z4ZTE7S4fBYQf0hvBsTxVqNTU1sNmsgOiE3WZFTU2N356brd6mJocDehKpg0k4lVnUdw/BYneOtj2LJnNHOkcc8rAO2eZwYtfOWig+edGrN8S/vGY2DBYbNlXX41jnIGZm6CK237S/SJ0seoasONXHA3rkGwbIUWC33gilQvCp/6vUnJ4jp71TWVkJQakGBAU0Gg0qKyv99tz5SbGIVSvYyWISB1pdAbIcArzkWDWmp8aFVScLOb3BCLb4GBWK0+I8bvX2zKvVsD73A+z4x+9RVVXlcZC8IDcJX1qSj9992IhdzQaWV7ihLMMVIO9uNgDgAT3yDQPkKLC72YB52QmI9aGxv9R/k3XI3lmxYgV0t2zCkvXfQHV1NSoqKvz23AqFgNJ0HQPkSRxs68eMtDjoQjBiejyL85Owr8X34RPBcqitH0qFELWHxOZnJ3jc6m3rG28DDjucTgesVu/uGj24pgwA0DVojdrX3hOzMnXot9jx2tFOAPD7gWiKLgyQI5wvE/TGytRpkBKrZicLLzX0mNCfWoI77/5/fg2OJa5OFtybiRwI8Yjpcy3OS0J99xCMZluol+KWg639mJkRHxXT88YzPycRdd1Do10l3GHLnQso1VAqlV7fNZqWEoe7V04HwAN67pBeo+f2t6AkPR5JPKBHPpBHOoUC5nSfGd1DVp9vNQmCK3vEXsje2XXaAABYPi0lIM8/MyMeWw+3w+Zwun1aPlqYrHbUdQ/hC4vyQr2UUdJBvf2tRlw64/ypanJzsG0AF0xLDvUyQmZedgIcThHHOgex0M2sZKN2Gi787u9xTWIXKisrvX5j/MPVpVApBVw9O9Orz48mUoDcNWhFVYn8f65I3viXNMJJvSD9cVhhdmYCM8he2nm6D7FqxejoWn+bmaGD3SmiiePAz/Np+6BrxLSMMsjhdFBvwGJHY68pqkZMn0uqXXe3DtlotuFY5yDWXHYJ7r//fp/uGiVq1fjF1bM97oQRjfKStIjXuO5ycMQ0+YoBcoTbrTdApRD8crhmdpYOXYNW9AxZ/bCy6LLrtKsPtT8n6I11ptUb65DPdaDVFYT6ckjV37ISYpCbqA2LOuTD7dF7QE9SmhEPjVLh9kS9T5oNEEVgRYDuGNH4FAoBZSNZ5CUFrD8m3zBAjnB7mo2Yl53gl9pBjpz2jtXuxN4WY0BvUc/MOHuKFJ1xsG0AuhglimTWQ3ZxflJYdLKQDqfJoQNIqKiVCszO0rl9UG/n6T4AwLIoLksJFWlgCA/oka9Ygxyhamtr8e6776K2Lhbrr7rML885O3Okk0XnIFYWp/nlOaPBobZ+DNuduCCALYfS4jVIjVMzgzyOg239mJ+dCIXMesguykvE60c7YLLavRoBHywH2/qREKNCYUpsqJcSUvOyE/DeyR63HrvjlKstG6e4Bd+dK6ahlAf0yA/k+1uZvFZbW4uqqioMW61wCkqkVjwLYIHPz1uY4uq3y1Zvntk10pNzeWFgb7fOzNCxk8U5Pv74Y+x84S9Yc/llAFaGejlnWZyXBKfoytAG+nvDF9KIaUGQ1xuMYJufk4hn9ragz2RFSpxmwseJooidp/tw9SweqguFVSXpWMUDeuQHLLGIQDU1NbBarXA6HIDDjsGT+/3yvAqFgFmZOhxhiYVHdp7qQ4ZOE/AMXFlGPDPIY9TW1qLq8sthee9pvPbgRr+O9/YHqZOFnMssRFHEobaB0fKK2tpabNq0SXavZTBIhxQ/naIOuanXjK5Bq6zf9BDR1BggR6DKykpoNBoICiWgVOGma67w23Ozk4XndjUbsHxaSsAzcDMzdGgxWjA4bA/odcJFTU0NrMOu8d4Ou82v4739oSA5Fmlxall3stAbLDCYbSjPTRy9M/XAAw94NRku3EkdaKYaOS3VHy9n/TFRWGOAHIEqKipQXV2N4mvvxMxv/B8uvdh/t5ZnZ+lwus/MIMxNUrunYPSQlTpZ1Hcziwy43igq1a7x3jF+Hu/tD4IgYFFe8A7q1dbW4tFHH/UosJVGTM/PThi9M+VweD8ZLpwVJMciSauastXbztN90KoUUX2okSgSMECOUCtWrEDPvOtRufIivz6v1MniOLPIbtk90u4pkAf0JFInC5ZZuFRUVOCS//4jUi7/st/He/vL4vwkHG4bgNXuDOh1pOzv//zP/3iU/ZWCwfk5iaN3pnyZDBfOBEHAvOyEKVu97TxlwJL8JA7sIQpz/AmOUA09JhjMNr83Sx/byYKmJh3QC0a7p5J0Vxsztno7oyW+CJd+/muyDI4B10E9q8MZ8Lp+b7O/B9sGUJgSi6RY9eidqQcffFC2bzgCbV5OIg61DUAUxXH/XWrpuIL1x0Rhj10sIpQ0QW+pn5ull6THQ6kQ2AvZTTtP9aE0PR6pk5x695c4jQoFyVp2shhhtjlwomsQNy3IDfVSJrQo/8xEPXdHGHtDyv5arVaPsr+H2vrPKhWoqKiIysBYMj87AX8229BitCA/+fxDtwdHWjrygB5R+GMGOULtbjZAo1RgXrZ/6+A0KgVK0uKYQXaDq92TAcsLk4N2TVerN5ZYAMBv3jsJpwisCOLr76mStHjoYpTYF+A6ZCn7e++997qd/R22O3CscxDlUTxi+lzSm4WJyix2nOIBPaJIwQxyhNqjN6I8NwEalf/fA83OSmAvZDfoDRa0DwzjgoLgZZPKMnT4x74WiKIY1X1r3z7ehQfePI4vLMrDGhn3o1UogndQr6KiAoWFhcjNdS+jfqxzEHanyMNmY8wbebNwqK1/3O+rnaf7kJ0Qg4JxsstEFF6YQY5ATqeIPXqD3+uPJbOzdKjvHoLNEdiDReFuV/NINimoGeR4GMw2dA9Zg3ZNuWnuM+MLz+zFnKwEbL6xXPZvFBblJWF/az8czvHrWv3F4RSxQz8Ap5vXkcYqlzNAHpUap0FuonbCkdM7TxmwfFqy7L/niGhqDJAj0PGuQRgtdqyYFpjM5exMHexOke3EprDrtAFqpYAFucELMKRWb8eitARm2O7AjU/vxrDdiZc2LEV8jPxvki3OS4Kp6VPc+8BPA9pb+Ffv1uNz/zyBx3eeduvxB1v7oVEqRr+nyGV+TsK4rd56TVbUdQ+x/pgoQjBAjkA7TxkABC5zOTtrpJMFyywmtfO0AQtzkxCjUgbtmtJBr09GumeEM2+mtv2/V45g52kDnrh5AcoydQFcnf8o248DL/4Qjzz8kE8DOCZ7vRp7THjw7RMAgIe318Puxt2fQ+39mJOlg4rtys4yPycRRzsHz3sNd502AACWBygxQUTBxd98EWjH6T4kaVUoywhMgDBrJPD4T837UTt2dioOp4jdzYagH9bJSdQie6ART/z+N2G9L95MbXtmjx5/+KgJ/+/SYnyuXL6dK8516uAuwGGH0+n9AI7JXi9RFHH31sNQCAJ+flkBGntNeHZfy5TPebB1AOVBvPsRLuZlJ2DY7jzvDtrOU30QBP93DiKi0GCAHIF2nurDsoJkKBSBqYPTxaiQ2d+ALfduiNqxs1M50jGAIasjKBP0xqqtrUXXU/+Nwy/9Kaz3xdO+vYfa+nHnCwdwSXEqHv7M7OAs0k8uu2wVFCo1oFB4PYBjstdr2+F2vHqkAz+9sgy3LsxAeU4iflFdP2ktcs+QFa39Fsz3cxecSDA/Z/yR0ztPGzAnKwGJWnUolkVEfhbwANnhcGDRokW45pprAACNjY1Yvnw5SkpKsH79elitrsNEw8PDWL9+PUpKSrB8+XI0NTUFemkRyWS141D7QMDr4BK7j8Fhs0Xt2NmpjN5uDXI9Yk1NDZx2GyA6w3pfPJna1m+x4XNP7kaSVo3nvrQk7EoCKioqsPanj0Nz8Zfw1tvveNVneKLXa3DYjru3Hsa87ATcffF0KAQBP7i8FMc6B/GvQ20TPp9UY1ueyxZv55qdlQCFABwec1BPFEXsOt0XsHMfRBR8Af9L8sgjj2D27DMZnfvuuw/f+c53UF9fj5SUFGzZsgUAsGXLFqSkpKC+vh7f+c53cN999wV6aRFpj94Ih1MM+K39xSsuApSqqB07O5Vdp/uQHKtGSVpwDzi5AqUYQFBAqVKH7b64O7VNFEV85bn9aOg14flblyAnURvklfrH9asvhXXJ55BcPM+rz5/o9frZWyfQbLDgT+vKR0cff648B2UZ8XjonboJJ8IdlAJkdrA4T6xaiZL0eBxqP3NQ72SPCT0mW1A71hBRYAU0QNbr9Xjttddwxx13AHD9Mdu+fTvWrVsHANiwYQO2bt0KANi2bRs2bNgAAFi3bh2qq6sn/OVNExs9oBfgTEbVJRcD6x7Cf93/QNSOnZ3MztMGXBDAMpeJVFRU4K233obq4i9i7U8fD+t9qaiowP333z/p1/C/NQ3416F2/Oqa2bi4OC2Iq/Mv6U7DzpE7D9449/U63NaP/3u/AbddUICLpqeOPk6pEHB/VSkOtPbjtaOd4z7XobYBpMdrkJUQ4/V6Itn8kZHTkjMDQphBJooUAe2B9O1vfxu/+tWvMDDg+kXS09OD5ORkqFSuy+bn56OlxXVYpKWlBQUFBa5FqVRISkpCT08P0tPTz3rOzZs3Y/PmzQCA9vZ2tLa2BvJLGFdXV1fQr+mumhOtmJakgX2gB60BnAadobQAubNQvqYEhYVJIdmHseS0JyabA4fb+lG5PDskr0vJjOlYfP2XUecUQ7ovgdgTh1NEY98wDneacKBjCFv2duLq0mSsL9GG/HvQF/GiiGStEu8ebcFVBb7/WnaKIm7/5wnoNAp8Z0nq6Gsj7UlltgIFiRr86PVPsSjJfl7f3j2nulGWFoO2tonLMKJZUTzwr+4h1J9qRpxaie1H9IhTK5DiHEBrq+fdfeT0+4tcuCfyE+w9CViA/OqrryIzMxNLlizxax3kxo0bsXHjRgDA0qVL3Z4K5W+huu5UDnZ+ioumpwd8fRcnDgM4gU67RjavhVzW8UFDDxwiUDWnALm52SFZw6WlBjzyQSPSMrOC2mbuXL7sidXuxKftA9jXYsTeFiP2tRhxoLUfQ1YHAEDVfhzTTSfxnWu+gLy8PH8tOWSWFzbjUPewX76Pn9h1GrtaBvH4TQswr2TaWf8mPf8PrrDhay8ewrEhDapmZoz+u9Mp4njvfty5fJpsfqbk5sKZAv63tg0GRQJKcpNxuOcklk1LQUG+99+HfK3lh3siP8Hck4CVWHz00Ud45ZVXUFRUhJtvvhnbt2/HPffcA4PBALvdDsBVgiH9YcvLy0NzczMAwG63w2g0Ii0tfG+ZhkKr0YJmgyUoB8PSdTFIj9ewF/I4pAN6F4TwdmtFUQqsDif26gM/wjgQhobtKHjwbSz+v/dx+/MH8PRuPZQKAbcvn4Yn1i/E05fGQv3yj9D477/guquvDNtuHWMtn5aCw+39GBy2+/Q8PUNW3PvvI7iwKAVfWVYw4eO+vKwAuYlaPPRO3Vkfb+g1wWR1sP54EmNHTltsDuxvNQa9pSMRBVbAAuRNmzZBr9ejqakJzz33HC677DI888wzWLVqFV588UUAwFNPPYXrr78eAHDdddfhqaeeAgC8+OKLuOyyyziu00M7T0t1cMlBud7sLB2OdgSwjiNM7TptQGFKbEjrNytG3iTVjtRGhpu9LUZ0DlrxszVlOPG9VTA8tAbvf/MiPPLZefjyBQXQf/qJR23gwsHywmQ4RWCP3uDT83zvtaMwWOx47HPlk9bAx6iUuHfVDNSc7MGHDT2jHz/YKnWwYIA8kRlp8YhVK3CobQD7W/thc4isPyaKMEHvh/TLX/4Sv/nNb1BSUoKenh7cfvvtAIDbb78dPT09KCkpwW9+8xs8/PDDwV5a2Nt5yjXaeFFecBrVz87U4WjnIA9TnmPn6b6Q/7HMTtSiKDUWtU3hGSDvGcl837l8GkozdOcFep60gQsXFxQkAzhz0NYbtU29eHznadxz8XS3Atw7l09Dhk6Dn1efySIfauuHIABzssJjEmEoKBUC5mQl4HB7P3ZKB/TYwYIoogT0kJ6ksrJy9A9YcXExdu3add5jtFotXnjhhWAsJ2LtPN2HBbmJ0KqDU3M6JysBvabTaB8YDtv2Wv7WMTCMU31m3LVyeqiXgorCVLw/JjMYTnY3G5CXpEX2BN9XUluzmpoaVFZWhnW3Dkm6LgYz0uJG7wR5yu5w4usvHUJekhY/uaLMrc+Jj1Hhvy4pxv2vH8PuZgNs+qN44S9/R37GLMRpgvLnIWzNy07Av995Dx3V9chIKkVeUmyol0REfsTfgBHC4RSxW2/AhqUT1xz624qR2/gfNvbixgU8zAC4+h8DCPoEvfFUFKbg2X0taO4zoyAlvP5479EbsSR/8jshFRUVEREYj7V8Wgre8/JNzaMfNuJAaz9e3LAECVr3f7V/46Ii/PLdk/h/f34Jn/zmLpgtw1Cq1ai9dWnEvb7+lNBbj96n70Ovw+56vW67gK8XUQQJr5FTNKEjHQMYHHaMBq3BsDg/CboYJWrqwzNLGQi7ThugVAhYHKQyl8lUFIVnHfKAxY7jXYNYkp8c6qUE3fLCZLQYLWgxmj36PKdTxC/fPYnLS9Nxw/wcjz43UavGPRdPx/vvvYfhYSsgOuG02yOirjuQhk7uBxx2vl5EEYoBcoQYrYMLYuZSrVTgoqJU1JzsDto15W7XaQPmZScgPib0N2cW5CYiVq1A7aneUC/FI/tajBBFYGlB6N9kBJtUu+5pHfL+ViM6BobxpaX5Xh1uvvvi6YidvgCiUgUICqg14TuFMVg+e9VqYOT10vD1Ioo4DJAjxM7TBqTGqVGSHuTRxjPScKRjEJ0Dw0G9rhw5nSJ2NRtk0+5JrVRgaUFy2B3U2z3SxSEaM8gL8xKhUSo8rkN+45hrIt6VZZleXTc1ToO7broK4uceBC66BX//12ssF5jCtZdfioQvPgxh5Rfx+ptv8fUiijAMkCPEztN9uGBactBb41WWuCYdhuthMH+q7xmCwWwLaf/jc1UUpmBvixEWmyPUS3HbnmYj8pO0UTnmOEalxMK8RI9HTr9xtBNL8pN8es3+69IZ0E6bg/iVN+NzV67y+nmihSAIWLFiBZbccDsuu/TiUC+HiPyMAXIEGLDYcbh9ICStxZbkJyFeo0TNSQbIoShzmUpFYQpsDjGsBobs0RumPKAXyZZPS8HuZgMcTvfaJ/aZrKg91YerZnmXPZZkJcTgZ2vKcMfyaZP2T6Yznrx5Ef61YVmol0FEAcAAOQLs1hsgiqEJzNRKBVZOT2WADFf9cbxGidlZCaFeyqiKolQA4XNQr99iw/GuISwd6QkcjZZPS8aQ1YFP290bwvP2iW44RfgcIAPAvatK8NvPzvP5eaJFbpI27DrEEJF7GCBHAClzGapb+5Uz0vBp+0DU1yHvajZgaUEylDLKvmUlxGB6alzYBMj7WlyZ7qjOII90otnh5p69cawTKbHqoIyYJyKKFgyQI8DO0waUpMcjLV4TkuuzDhkYtjuwv6VfVuUVkorCFNQ29YXFxMPdzVKAnBzahYTQjLQ4pMWp3Tqo53SKePNYJ64oy5DVGzMionDHADnMiaKInaf7sCKEY05Zhwx81NgHq8MpiwEh56ooSkFrvwXNBs9664bCHr0RBclaZEbhAT2JIAi4YFqKWwf1DrT2o31g2C/lFUREdAYD5DCnN1jQ1j8ckgN6kmivQ377eBfWPvkJshNisGokmy4nFSO33sOh3ZvrgF5yqJcRcsunJeNIxwD6LbZJHye1d1vDAJmIyK8YIIc56TZsKANkIHrrkP9c24SrHt+JwpRY7LxnJVLjQlPmMpny0YEh8g6QjWYbTnQNReWAkHMtL0yBKJ4pOZnIG8c6sdjH9m5ERHQ+BshhbudpA2JUCizITQzpOqKtDtnhFPFf2z7F1148hCvLMvDRt1ZiWkpcqJc1LrVSgWUFybIPkM8c0EsO7UJkQCrVmawO2WC2ofZUH9aUZQRpVURE0YMBcpjbcaoPi/KSoFGFdiulOuT3oqDMYnDYjrVPfIL/e78Bd188Hdu+sgwJ2tCPlp5MRWEq9sl8YMiZA3rMIKfGaVCaHj/aoWY875zogsMpsv6YiCgAGCCHMZvDiT16eYw2VisVuKgo8uuQ9QYzLv79R3jtaAd+v3YeHvnsPKiU8v8xqihyDQzZI+OBIXv0BkxLiUWGjuUCALC8MBk7Txsm7D7yxrFOJMeqsYLt3YiI/E7+f9lpQofbBmC2OUNefyypLEnD4fYBdA1GZh3ynmYDLnjkA5zsMeG1O5bjmyunh3pJbguHg3q79UZmj8dYPi0F7QPD43YfEUURbx7rwuqZ6WHxBo2IKNzI+74wTUqqT5RLBqlyRhoAVx3y58pzQ7wa97UaLbjur7tgdTgRo1JAq1JCq1JAq1aO/LcCaqUCzx9oRXq8Bh/ftQLzckJb8+2pzIQYFKfFYYcbvXVDwWi2ob57CF9ZVhDqpciG9MZ352nDefXtB9v60dpvYXkFEVGAMPUQxnaeNiBDp0FRqjxGnS4tSEacRoma+vAqs3jtaAf26I2YlhyLTF0MNEoFTDYHWvstONY5iB2n+vD2iS5cVJSCnXevDLvgWCLngSF7OUHvPAtyExGjUoxbh/zGUbZ3IyIKJGaQw9jO031YPi0FgiCPCVpqpQIrw7AO+YOGXmTqNPj37RfI5rUMhBWFKXhmbwuaDWbZddzY3WwAwAB5LI1KgUV5SeMODHnjWCcW5iYiJ1Eb/IUREUUBZpDDlNFsw7HOQVkc0BsrHOuQP2zsxcrpqT4Fx7W1tdi0aRNqa2v9uDL/knMd8h69EYUpsUjnAb2zLJ+WjD16A2wO5+jHjGYbPmrqw1WzmT0mIgoUZpDD1CfNBohi6AeEnCvc6pBbjGY09ppwXUofNm16B5WVlaioqPDoOWpra1FVVQWr1QqNRoPq6mqPnyMYxg4MWb8oL9TLOcvuZgOzx+NYPi0Fj3zQiMNtA7A0H0FNTQ3E/LlwOAXWHxMRBRAD5DAlHdBbJrMM8tg65HAIkD9o6AVaj+FPf/wx7DbvAtyamhpYrVY4HA5YrVbU1NTIMkAO5sCQt4534v+2n8SzX85Acqx60sf2maw42WPC7cunBXxd4WZ5YTIA4NnXq/H7b38RVqsVUKoQf/MvUFH4mdAujogogrHEIkztOGXArEzdlMFHsEl1yO+FyUS9Dxt7oWn7FHbb2QGuJyorK6HRaKBUKqHRaFBZWRmQtfpDRWEq9uzaiQcf+nnAykHqugax9hd/x5v/+Au+/Jvnpnz8Xj0P6E1kemoc0uM1qN7+7uibMIfNhummk2zvRkQUQPwNG4ZEURw5oJcc6qWMq7IkDYfaBtAdBnXIHzT0YsHyC30KcCsqKlBdXY0HH3xQtuUVkhTDSTie/wF+8uMfo6qqyu9B8uCwHVf85EmY/vF94KNnsO0nd+CxF9+c9HP26DlieiKCIGD5tGT0pJWNfo9CqcK1V14e6qUREUU0BshB5o/DXE29ZnQNWmXT//hcZ+qQe0O8kskZzDYcau/HtZdX+hzgVlRU4P7775d1cAwA/fX7AIcdTqd32fLJiKKI2/65H6cO7oLCaQdEJ+Cw40ePvzjpiOs9eiOKUmORFq/x21oiyfLCFJyOLcS21/+D1V/+NrDuIXzzxjWhXhYRUURjgBxE0mGuBx54wKfs3VOvvA3segHarjo/r9A/RuuQZd7u7eOmXogisHJ6atgEuL665srLIajUgML/5SD/W9OAFw604es3XYuYGFe2MyZGg+6UMvyieuLv1d16A7PHk1g+LRmiCAi5s2BZfAPKl1yAvCR59D4nIopUDJCDaLzDXJ76y7/+g59uXA989Ay+8fnrZdlW7Ew/5O5QL2VSHzT0Qq0UZFuqEggVFRW45kebEXPJF/HW2+/47Q3B9rpu3PfaEawrz8Hvv7UO1dXVuPfee/Hu9u340rWXY1N1PQ619Z/3eX0mKxp6TFjK+uMJXTDSqeadE934sLGX3SuIiIKAAXIQ+XqY642jHfjm754FHDZAdPr9Frk/XTpD/nXIHzT0YEl+MuI00dXM5earL8Pw4s8hrnCOX57vdJ8J6/+2B7Mydfjr+oUQBAEVFRW46667UFFRgd9cNwfJsWrc+fwBOJxnT/Fj/fHUkmPVKMuIx2O1TbA7RVw1OyPUSyIiingMkIPIl8Ncf9vdjOv++gmml18ArTZG9h0T5F6HbLE58EmzESunp4Z6KUFXOSMdAPCuH0aCW2wO3PDkblgdTrz8lWVI0J7/ZiNdF4PfXj8XO08b8MePms76t9EAuYAZ5MksL0xBv8WORK0KFxZF3/csEVGwMUAOMm9qXf+35iRufXY/LilOwye/uhPbw6BjgtzrkD9pNsDqcOLiKAyQc5O0mJkR7/PeiKKIb7x0CHv0Rvzt84swM0M34WO/sDgPa2Zl4P7Xj+J0n2n047ubDZieGofUOB7Qm4w0EOjy0nSo2d6NiCjgouvecpiora1FTU0NLrnkUmztTcava07ixgU5+NsXFiFGpURFRYVsA2OJRqXARUUpsq1D/mAks32RmwGytCfeTNqTo1Ul6Xh2XwvsDqfX/XQ37ziFJz5pxgOrS3HdvOxJHysIAh77XDnm/k8Nvv7SIbx6+wUQBAF79EYsZfZ4StKdjmvmZIV4JURE0YEBssyMHVsMpQqOGx7EN29cg0c+Ow9KhRDq5XmkckY6frBlKx746Ye4+ooqWQWWHzT2YE6Wzq3WYuEyStoTq2ak4c+1p7Cvpd+raYw7TvXhrpcP46pZmfjxFWVufU5Rahx+ftUsfGfbp/jn/lZcUZaBxl4TvlpR6PH1o015biJ23L0SSwuSQ70UIqKowHt1MjO204XDZsPlse14dG34BccAkG5sAF78IX7xs58EZCiFtxxOER839eHi4jS3Hu+P7iNyc+lIjbg3GX6r3Ykbn9qNguRYPHPLIo++N+9aOR3LCpJx99bDePt4FwBO0HPX8sKUsPw9QEQUjhggh5goimjoGcJLB1vxwBvH8Gp/BhyCChAUUGs0+Nmd6yAI4flHsfPYnoANpfDFobZ+9Fvsbtcfh9MoaXdlJ2oxO0vn1UG9mpPd0Bst+N9r5yDFw9phpULA4zctQJ/Jhq++eBAAA2QiIpIfllgEmcXmwAsHWrG3xYh9Lf3Y32KE0WIHACgEYHZWCa74wZ+Q11+HO2+8Jqxv5Vddtgo//qkaTrtNVoGlVH/sbgcLqftIJNUgA65OI3/bo4fN4fTo4NfWw+2I1yhxpZf9eMtzE/Hfq2bgF9X1KE6L8zjIJiIiCjQGyEG2eccp3LP1U8SqFViQm4TPL8rDorwkLMpLwrycBMSqlaFeot9UVFTgjl8/hc0vvIZ//fSOgAWWRrMNpwzDyM117/EfNPagIFmLwtQ4t68RDgcjPbWqJB2PfXwKe/VGLHdzbLnTKWLr4XasmZXp0/fqA6tn4tnXtyP10HHU1sZF3GtLREThjQFykL1zohsz0uJw/HuXRUU94a3XrsbmU3GwZJQG5Pk/OW3ANQ8+hd4Te/Hs/Ruwbs1lkz5eFEV82NiLVSO9gKPZpSM12O/Wd7sdIO9qNqCtfxifnaJrxVT27d6F9r/ei9NWK6q2PR4RBx+JiChysAY5iOwOJ95r6EFVaXpUBMcAsKwgGbFqRUD6IW/ZeRoX3v84up68D/YP/o71112Njz/+eNLPaegxoa1/GCuLo6//8bkyE2IwNzvBozrkrYfaoVII+Mxs38YdR+LBRyIiihwMkINoX4vrcNiqkujJXmpUClxYlOrXAHnY7sBXXziAO54/gCLTSShEOyA64bTb8Ou/bZ30c6X642gcEDKeVTPS8GFTL2wO55SPFUURLx9uQ+WMNBw7sAebNm3yujNJJB58JCKiyMEAOYi217taakVTgAy4DoMdbOtHr8nq1efX1taOBmN6gxmX/OFjbN5xGvdXleCJ735pNNBSqNT4jykbeoN5wuf6sLEXKbFqzMlK8PbLiSirStJhsjrwyWnDlI892jGIE11DWCC0oaqqCg888IDX7ft8GbtOREQUaKxBDqLtdd2Ym52ArISYUC8lqC6dkQZRdGVvr/ewdnXskA6VWo3Yz/8CjuxZ+NeXl2Lt/BwAQHV1NV555RUsvmQ1NrxnxtdfOoRXbls2bnu8Dxp7cNH0VCiipMRlKpeMlJrUnOzBhVNk1bcebgcAqFsPn1ce4U2AG4kHH4mIKDIwgxwkVrsTHzT24LIoyx4DwAXTkqFVKbwaSjG2VnV42Ap16xHsumflaHAMuAKtu+66CzdedRl+ftUsvHqkA8/uaznvuToGhnGia4jlFWOk62JQnpOId+un3puXD7fhgmnJuG7NapZHEBFRRGOAHCQ7T/fBbHPishL3prdFkhiVEhVFKXjPizrklRdfAihdg1OUajX+8b1bMWuS8oi7Ly7GisIU3P3yYXQODJ/1bx81jtQf84DeWVaVpOGjpl4M2x0TPqa5z4zdzUasnZfN8ggiIop4DJCDZHtdNwThzIjfaFM5Ix37W/vR52Edcl/yDDhueBCXbbgH77+7HZdXXjzp45UKAVtuWoCBYQfu3nr4rH/7oLEHWpUCS/KTPV1+RKuckQazzTlpHfK2T13lFVLmvqKiAvfffz+DYyIiikgMkIPk3ZM9WJSXFLVTwyrH1CF74rn9rUgrmY83H/81LrzwQrc+Z052Ah5YXYp/7m/FtpG6WcB17eWFKdCo+G0/1iUz0iAIru/Ribx8qB2zMnUoy9QFcWVEREShwUghCExWO2qb+qKy/lhywbRkxKgUeK/B/TILk9WOVz5tx+fKczwahQwA911WggW5ifj6SwdhMNswYLFjX4uR9cfjSI3TYMEkdci9Jivea+jB2vm+DQchIiIKFwyQg+Djpj5YHdFZfyzRqpWoKEzxqB/ya0c7MWR14OZFeR5fT61U4K/rF6Bz0IrvvnIEO071wSkCKxkgj2tVSTpqm/pgsZ1fh/zqkQ44nCLWzssZ5zOJiIgiDwPkINhe3w2VQsDK6dEbIAOuMot9LUYYzDa3Hv/cvhZkJ8TgkmLvXrfF+cm4t3IGtuw6jV9U10EhABVF7o1UjjarStJgsTux83Tfef/28qF25CVpsSQ/KQQrIyIiCj4GyEGwva4bF0xLRoI2uttOS/2QP2ycug6532LD60c7ceOCXJ/Gcv/oipmYmRGPmpM9WJiXhESt2uvnimQXF6dBIeC8sdMmqx3/Od6Jz87LZu9oIiKKGgyQA8xotuGTZkNU1x9LVhSmIEalQI0bPXdf+bQDFrsT6xfm+nTNWLUSW25aAEE4MxSDzpccq8aivKTzSmDeOt4Fs82JtR4OeCEiIgpn0Z3SDIIPGnvhFKNvvPR4tGolVrhZh/zP/a0oSNaiotD3koiVxWn44JsXYRY7MExqVUk6fvdBI8w2B2LVSgDAy4fbkRKrxiVR2p6QiIiiEzPIAfZufTdiVArWvo64tNhVh2ycpA65z2TFf453Yv3CPL/d1r9oeirS4qOzxZ67KmekwepwYscpVx2y3eHEvz/twDVzsjzuIkJERBTO+FcvwLbXdePCopTRjFy0qyxJg3OKOuSXD7XD5hB9Lq8Yq7a2Fps2bUJtba3fnjPSXFycOlKH7CqBeb+hF31mG9u7ERFR1GGJRQD1DFmxv7UfD64pC/VSZGNFYQo0SgVqTvbgM3Oyxn3Mc/tbMCMtzm9dE2pra1FVVQWr1QqNRsPxyBNI1KqxJD959KDey4faoFUpcMXMjBCvjIiIKLiYQQ6gmpOuTBwP6J0Rq1ZieWEy3pugDrlzYBjb63uwfmEuBME/5RU1NTWwWq1wOBywWq2oqanxy/NGolUladh5ug9Dw3ZsPdyOK8syEB/D99FERBRdGCB76L7N/8KX73fvVv32uh7Ea5RYNi058AsLI5Uz0rBHb0C/5fw65JcOtcHhFL0aDjLh9SorodFooFQqodFoUFlZ6bfnjjSrStJhc4j4/UdN0BstWDufw0GIiCj6MED2wMcff4z/vesLePtvf0TlqsumDJK313fj4uJUHnA6x6XFE9ch/3N/K2Zn6TAvO8Fv16uoqEB1dTUefPBBlldM4aKiVCgVAn7+Th2UCgHXTFAGQ0REFMkYuXngvffeAxx2QHTCarXiLy+8NuFj2/otONY5yPKKcVQUpUCtFM4rs2g1WvB+Qw9uXpjnt/KK0WtWVOD+++9ncDyFBK0KywqSMTBsxyXF7PxBRETRiQGyB8beqhdUajzbnYJPThvGfazUCYAB8vniNCosn3Z+P+QXDrRCFOHX7hXkuVJbM7DrBSxAa6iXQkREFBIMkD0g3aq/9957se31N5FTthBXP74TxzsHz3vs9roeJMeqsTDPP50YIs2lM9KwR2/EgMU++rHn9rdiYW4iyjjQI2Rqa2vx/A9vBz56Bn/6f7eyLR4REUUlBsgeqqiowF133YVrL6/EfzYuhyAAV27egVaj5azHba/vRuWMNCj9NOgi0lTOSIPDKeKjJlcdclOvCTtO9TF7HGI1NTWw26yA6ISNHT+IiChKMUD2QWmGDm/csRw9Jiuu3LwDfSYrAKCxx4TGXhPLKyZRUeiqQ64Z6bn7/H7X7fz1C/3XvYI8x44fREREHBTisyUFydj65WW4+vFduO6vn+Ctr64YrT9eVZIW4tXJV3yM6zDYv9+pQdKhrXiyPQnLi+djelpcqJcW1aQyopqaGlRWVvJQIxERRSUGyH5QNTMDf79lEdb/bQ/WP70HcRolMnQazPVjq7JIVGJtxtN/+DZ+KDrgFJS4+5G/h3pJBFeQzMCYiIiiGQNkP7lxQS66h6z4xkuHAMCvk+AilbLlMOCwwyk6AUFEXMfRUC+JiIiIKHA1yBaLBRdccAEWLFiAuXPn4sc//jEAoLGxEcuXL0dJSQnWr18Pq9VVtzs8PIz169ejpKQEy5cvR1NTU6CWFjBfv7AIP75iJtB6DLba59kBYAq3XL8GUKoAQQGFSo3r1qwO9ZKIiIiIAhcgx8TEYPv27Thw4AD279+PN998Ezt27MB9992H73znO6ivr0dKSgq2bNkCANiyZQtSUlJQX1+P73znO7jvvvsCtbSAuiKhBzFbf4xtm/8XVVVVDJInUXXpxZj3rUeAi27Bdx79B2/rExERkSwELEAWBAE6naufrc1mg81mgyAI2L59O9atWwcA2LBhA7Zu3QoA2LZtGzZs2AAAWLduHaqrqyGKYqCWFzDvvfce7DYrHA4HrGyTNaW1V1ZCXXET7v3CZ0K9FCIiIiIAAa5BdjgcWLJkCerr6/HNb34TM2bMQHJyMlQq12Xz8/PR0tICAGhpaUFBQYFrUSoVkpKS0NPTg/T0s1ulbd68GZs3bwYAtLe3o7U1+NO+urq6Jvy3uXPnQq1WAwDUajXmzp0bkjWGiw2zdajKmw3HQA9aB7x/nsn2hEKDeyI/3BN54r7ID/dEfoK9JwENkJVKJfbv3w+DwYC1a9fi2LFjPj/nxo0bsXHjRgDA0qVLkZsbmsESE133uuuuw/bt29kmywMz/PQ8ofpeoIlxT+SHeyJP3Bf54Z7ITzD3JChdLJKTk7Fq1SrU1tbCYDDAbrdDpVJBr9cjL881GCIvLw/Nzc3Iz8+H3W6H0WhEWlp49hFmmywiIiKi8BWwGuSuri4YDAYAgNlsxttvv43Zs2dj1apVePHFFwEATz31FK6//noArszrU089BQB48cUXcdlll7FNGhEREREFXcAyyG1tbdiwYQMcDgecTiduuukmXHPNNZgzZw5uvvlm/PCHP8SiRYtw++23AwBuv/12fOlLX0JJSQlSU1Px3HPPBWppREREREQTCliAXF5ejn379p338eLiYuzateu8j2u1WrzwwguBWg4RERERkVsCVmJBRERERBSOGCATEREREY3BAJmIiIiIaAwGyEREREREYzBAJiIiIiIagwEyEREREdEYDJCJiIiIiMZggExERERENAYDZCIiIiKiMRggExERERGNwQCZiIiIiGgMQRRFMdSL8FZ6ejqKioqCft2uri5kZGQE/bo0Me6J/HBP5Id7Ik/cF/nhnshPoPakqakJ3d3d5308rAPkUFm6dCl2794d6mXQGNwT+eGeyA/3RJ64L/LDPZGfYO8JSyyIiIiIiMZggExERERENAYDZC9s3Lgx1Eugc3BP5Id7Ij/cE3nivsgP90R+gr0nrEEmIiIiIhqDGWQiIiIiojEYIBMRERERjcEAGcBtt92GzMxMzJs3b/RjBw4cQEVFBebPn49rr70W/f39AFz98mJjY7Fw4UIsXLgQX/va1wAAAwMDox9buHAh0tPT8e1vfzsUX07E8Me+AMCzzz6L+fPno7y8HGvWrBm33yG5x1978s9//hPl5eWYO3cu7rvvvqB/HZHEkz0BgIMHD6KiogJz587F/PnzYbFYAAB79uzB/PnzUVJSgrvvvhusvvOev/bkBz/4AQoKCqDT6YL+NUQaf+yJyWTCZz7zGcyaNQtz587F9773vVB8KRHFXz8ra9aswYIFCzB37lx87Wtfg8Ph8H1xIonvvfeeuGfPHnHu3LmjH1u6dKlYU1MjiqIobtmyRfzhD38oiqIoNjY2nvW4iSxevFh87733ArPgKOGPfbHZbGJGRobY1dUliqIo3nvvveKPf/zjwC8+QvljT7q7u8WCggKxs7NTFEVRvPXWW8V33nknCKuPTJ7sic1mE+fPny/u379fFEXXXtjtdlEURXHZsmVibW2t6HQ6xTVr1oivv/56kL+SyOGvPamtrRVbW1vF+Pj4IH8FkccfezI0NCRu375dFEVRHB4eFleuXMmfEx/562fFaDSKoiiKTqdTvOGGG8Rnn33W57UxgwzgkksuQWpq6lkfO3HiBC655BIAwOrVq/HSSy+5/XwnTpxAZ2cnLr74Yr+uM9r4Y19EUYQoihgaGoIoiujv70dubm7A1hzp/LEnDQ0NKC0tHZ2IdPnll3v080Vn82RP3nrrLZSXl2PBggUAgLS0NCiVSrS1taG/vx8rVqyAIAi49dZbsXXr1qB+HZHEH3sCACtWrEBOTk4QVx65/LEncXFxWLVqFQBAo9Fg8eLF0Ov1QfwqIo+/flYSExMBAHa7HVarFYIg+Lw2BsgTmDt3LrZt2wYAeOGFF9Dc3Dz6b42NjVi0aBEuvfRSfPDBB+d97nPPPYf169f7ZYPobJ7ui1qtxmOPPYb58+cjNzcXR44cwe233x6StUcqT/ekpKQEx48fR1NTE+x2O7Zu3XrW55DvJtqTEydOQBAEXHnllVi8eDF+9atfAQBaWlqQn58/+vn5+floaWkJ/sIjmKd7QoHny54YDAb8+9//RlVVVVDXHA283Zcrr7wSmZmZSEhIwLp163xeBwPkCfz1r3/FH//4RyxZsgQDAwPQaDQAgJycHJw+fRr79u3Db37zG3zhC184qz4GcAXIn//850Ox7Ijn6b7YbDY89thj2LdvH1pbW1FeXo5NmzaF+KuILJ7uSUpKCh577DGsX78eF198MYqKikazAOQfE+2J3W7Hhx9+iGeeeQYffvghXn75ZVRXV4d4tdGBeyI/3u6J3W7H5z//edx9990oLi4O1fIjlrf78p///AdtbW0YHh7G9u3bfV6HyudniFCzZs3CW2+9BcD1ruW1114DAMTExCAmJgYAsGTJEsyYMQMnTpzA0qVLAbiKy+12O5YsWRKahUc4T/dFHDloNGPGDADATTfdhIcffjgEK49c3vysXHvttbj22msBAJs3b2aA7GcT7Ul+fj4uueQSpKenAwCuvvpq7N27F1/84hfPulWs1+uRl5cX/IVHME/3hJnJwPN2TzZu3IjS0lIexA8QX35WtFotrr/+emzbtg2rV6/2aR3MIE+gs7MTAOB0OvHQQw+NnsDv6uoaPR3Z0NCAurq6s95BPvvss8weB5Cn+5KXl4cjR46gq6sLAPD2229j9uzZoVl8hPLmZ0X6nL6+Pvzxj3/EHXfcEYKVR66J9uTKK6/EoUOHYDKZYLfb8d5772HOnDnIyclBYmIiduzYAVEU8fTTT+P6668P5ZcQcTzdEwo8b/bkhz/8IYxGI37729+GatkRz9N9GRwcRFtbGwBXlvm1117DrFmzfF+Iz8f8IsDNN98sZmdniyqVSszLyxMff/xx8be//a1YWloqlpaWivfdd5/odDpFURTFF198UZwzZ464YMECcdGiReIrr7xy1nNNnz5dPHr0aCi+jIjjr3157LHHxFmzZonz588Xr7nmGrG7uztUX1LY89ee3HzzzeLs2bPF2bNn++W0cTTzZE9EURT/9re/iXPmzBHnzp0r3nvvvaMf/+STT8S5c+eKxcXF4je/+c2zPoc84689uffee8W8vDxREAQxLy+PHXh84I89aW5uFgGIs2bNEhcsWCAuWLBA/Mtf/hKqLyki+GNf2tvbxaVLl4rz588X586dK37rW98SbTabz2vjqGkiIiIiojFYYkFERERENAYDZCIiIiKiMRggExERERGNwQCZiIiIiGgMBshERERERGMwQCYiikA/+clP8Otf/zrUyyAiCksMkImIiIiIxmCATEQUIX7+859j5syZWLlyJY4fPw4A+N3vfoc5c+agvLwcN998c4hXSEQUHlShXgAREfluz549eO6557B//37Y7XYsXrwYS5YswcMPP4zGxkbExMTAYDCEeplERGGBGWQiogjwwQcfYO3atYiLi0NiYiKuu+46AEB5eTluueUW/P3vf4dKxZwIEZE7GCATEUWw1157Dd/85jexd+9eLFu2DHa7PdRLIiKSPQbIREQR4JJLLsHWrVthNpsxMDCAf//733A6nWhubsaqVavwy1/+EkajEYODg6FeKhGR7PF+GxFRBFi8eDHWr1+PBQsWIDMzE8uWLYMgCPjiF78Io9EIURRx9913Izk5OdRLJSKSPUEURTHUiyAiIiIikguWWBARERERjcEAmYiIiIhoDAbIRERERERjMEAmIiIiIhqDATIRERER0RgMkImIiIiIxmCATEREREQ0xv8HRwwiprtWl2MAAAAASUVORK5CYII=\n", - "text/plain": [ - "<Figure size 720x432 with 1 Axes>" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "m = NeuralProphet(seasonality_mode=\"multiplicative\", learning_rate=0.1)\n", "metrics_train2 = m.fit(df=df, freq=\"MS\")\n", @@ -313,12 +290,13 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "id": "fbf84c93-dc7c-406d-941a-87ae5b8adafa", "metadata": {}, "outputs": [], "source": [ - "METRICS = [\"SmoothL1Loss\", \"MAE\", \"RMSE\"]\n", + "METRICS = [\"MAE\", \"RMSE\"]\n", + "METRICS_VAL = [\"MAE_val\", \"RMSE_val\"]\n", "params = {\"seasonality_mode\": \"multiplicative\", \"learning_rate\": 0.1}\n", "\n", "df = pd.read_csv(data_location + \"air_passengers.csv\")\n", @@ -327,35 +305,27 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "id": "b585c737-08f6-468e-890a-558348f6b9ee", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " \r" - ] - } - ], + "outputs": [], "source": [ "metrics_train = pd.DataFrame(columns=METRICS)\n", - "metrics_test = pd.DataFrame(columns=METRICS)\n", + "metrics_test = pd.DataFrame(columns=METRICS_VAL)\n", "\n", "for df_train, df_test in folds:\n", " m = NeuralProphet(**params)\n", " train = m.fit(df=df_train, freq=\"MS\")\n", " test = m.test(df=df_test)\n", " metrics_train = metrics_train.append(train[METRICS].iloc[-1])\n", - " metrics_test = metrics_test.append(test[METRICS].iloc[-1])" + " metrics_test = metrics_test.append(test[METRICS_VAL].iloc[-1])" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "id": "e09fc84b-3e11-47be-aa88-024b0c27c8a1", "metadata": { "tags": [] @@ -382,49 +352,44 @@ " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", + " <th>MAE_val</th>\n", + " <th>RMSE_val</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>mean</th>\n", - " <td>0.009792</td>\n", - " <td>21.090633</td>\n", - " <td>26.227701</td>\n", + " <td>22.738665</td>\n", + " <td>27.219829</td>\n", " </tr>\n", " <tr>\n", " <th>std</th>\n", - " <td>0.008580</td>\n", - " <td>7.266532</td>\n", - " <td>9.003498</td>\n", + " <td>5.473991</td>\n", + " <td>5.156014</td>\n", " </tr>\n", " <tr>\n", " <th>min</th>\n", - " <td>0.003340</td>\n", - " <td>10.293222</td>\n", - " <td>13.339386</td>\n", + " <td>16.465864</td>\n", + " <td>20.199633</td>\n", " </tr>\n", " <tr>\n", " <th>max</th>\n", - " <td>0.023061</td>\n", - " <td>26.983458</td>\n", - " <td>34.758419</td>\n", + " <td>30.117323</td>\n", + " <td>32.755363</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ - " SmoothL1Loss MAE RMSE\n", - "mean 0.009792 21.090633 26.227701\n", - "std 0.008580 7.266532 9.003498\n", - "min 0.003340 10.293222 13.339386\n", - "max 0.023061 26.983458 34.758419" + " MAE_val RMSE_val\n", + "mean 22.738665 27.219829\n", + "std 5.473991 5.156014\n", + "min 16.465864 20.199633\n", + "max 30.117323 32.755363" ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -448,18 +413,10 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "f23fd06b-2f0c-4f2c-92fa-5ad28e286d4c", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Importing plotly failed. Interactive plots will not work.\n" - ] - } - ], + "outputs": [], "source": [ "from neuralprophet.benchmark import Dataset, NeuralProphetModel, SimpleExperiment, CrossValidationExperiment" ] @@ -475,39 +432,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 4, "id": "c7d050b7-2b8e-40d1-ba4d-e43e22a64c48", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "fb260098c8ea489186b3d268b6628065", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/107 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "052d56ecaad5483f9c82c565329c060c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/107 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "ts = Dataset(df=pd.read_csv(data_location + \"air_passengers.csv\"), name=\"air_passengers\", freq=\"MS\")\n", "params = {\"seasonality_mode\": \"multiplicative\"}\n", @@ -560,95 +488,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 5, "id": "9a5812bd-45d5-4a45-af9f-8dac25524e15", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "dc921d64bf8d4e9db22b1c535aac7465", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/107 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "5eba2ce8155447a6b5c3177b1d4367fd", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/107 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3618c693fbda4148912fb3dd9274f989", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/108 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3c64478c367445308bcb368d9f74ab2c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/108 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "536037272e07411d9a7dbabfdd9edb22", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/109 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "b180271ce8184fbf98306b9aff2fe3f2", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/109 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "ts = Dataset(df=pd.read_csv(data_location + \"air_passengers.csv\"), name=\"air_passengers\", freq=\"MS\")\n", "params = {\"seasonality_mode\": \"multiplicative\"}\n", @@ -712,18 +555,10 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 12, "id": "8e342fef-b57d-4ab8-8550-f9311cb0e032", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " \r" - ] - } - ], + "outputs": [], "source": [ "m = NeuralProphet(seasonality_mode=\"multiplicative\", learning_rate=0.1)\n", "\n", @@ -745,83 +580,10 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 13, "id": "635e284c-b3b7-477c-ada0-9a131912137f", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "<div>\n", - "<style scoped>\n", - " .dataframe tbody tr th:only-of-type {\n", - " vertical-align: middle;\n", - " }\n", - "\n", - " .dataframe tbody tr th {\n", - " vertical-align: top;\n", - " }\n", - "\n", - " .dataframe thead th {\n", - " text-align: right;\n", - " }\n", - "</style>\n", - "<table border=\"1\" class=\"dataframe\">\n", - " <thead>\n", - " <tr style=\"text-align: right;\">\n", - " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", - " <th>split</th>\n", - " </tr>\n", - " </thead>\n", - " <tbody>\n", - " <tr>\n", - " <th>563</th>\n", - " <td>0.000379</td>\n", - " <td>5.379905</td>\n", - " <td>6.704538</td>\n", - " <td>train1</td>\n", - " </tr>\n", - " <tr>\n", - " <th>491</th>\n", - " <td>0.000318</td>\n", - " <td>6.277920</td>\n", - " <td>7.928728</td>\n", - " <td>train2</td>\n", - " </tr>\n", - " <tr>\n", - " <th>0</th>\n", - " <td>0.005016</td>\n", - " <td>18.140518</td>\n", - " <td>25.140703</td>\n", - " <td>validate</td>\n", - " </tr>\n", - " <tr>\n", - " <th>0</th>\n", - " <td>0.005475</td>\n", - " <td>27.335314</td>\n", - " <td>33.275681</td>\n", - " <td>test</td>\n", - " </tr>\n", - " </tbody>\n", - "</table>\n", - "</div>" - ], - "text/plain": [ - " SmoothL1Loss MAE RMSE split\n", - "563 0.000379 5.379905 6.704538 train1\n", - "491 0.000318 6.277920 7.928728 train2\n", - "0 0.005016 18.140518 25.140703 validate\n", - "0 0.005475 27.335314 33.275681 test" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "metrics_train1[\"split\"] = \"train1\"\n", "metrics_train2[\"split\"] = \"train2\"\n", @@ -842,12 +604,13 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 8, "id": "dac90c4f-63e7-42f2-b49e-f6b3177e070f", "metadata": {}, "outputs": [], "source": [ - "METRICS = [\"SmoothL1Loss\", \"MAE\", \"RMSE\"]\n", + "METRICS = [\"MAE\", \"RMSE\"]\n", + "METRICS_VAL = [\"MAE_val\", \"RMSE_val\"]\n", "params = {\"seasonality_mode\": \"multiplicative\", \"learning_rate\": 0.1}\n", "\n", "df = pd.read_csv(data_location + \"air_passengers.csv\")\n", @@ -858,107 +621,45 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 14, "id": "96d8154e-c77a-4836-ad92-9b205e88b268", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " \r" - ] - } - ], + "outputs": [], "source": [ "metrics_train1 = pd.DataFrame(columns=METRICS)\n", - "metrics_val = pd.DataFrame(columns=METRICS)\n", + "metrics_val = pd.DataFrame(columns=METRICS_VAL)\n", "for df_train1, df_val in folds_val:\n", " m = NeuralProphet(**params)\n", " train1 = m.fit(df=df_train, freq=\"MS\")\n", " val = m.test(df=df_val)\n", " metrics_train1 = metrics_train1.append(train1[METRICS].iloc[-1])\n", - " metrics_val = metrics_val.append(val[METRICS].iloc[-1])\n", + " metrics_val = metrics_val.append(val[METRICS_VAL].iloc[-1])\n", "\n", "metrics_train2 = pd.DataFrame(columns=METRICS)\n", - "metrics_test = pd.DataFrame(columns=METRICS)\n", + "metrics_test = pd.DataFrame(columns=METRICS_val)\n", "for df_train2, df_test in folds_test:\n", " m = NeuralProphet(**params)\n", " train2 = m.fit(df=df_train2, freq=\"MS\")\n", " test = m.test(df=df_test)\n", " metrics_train2 = metrics_train2.append(train2[METRICS].iloc[-1])\n", - " metrics_test = metrics_test.append(test[METRICS].iloc[-1])" + " metrics_test = metrics_test.append(test[METRICS_VAL].iloc[-1])" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "c5ea1cbc-fbb2-46b1-9a43-4547f8b6113a", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "<div>\n", - "<style scoped>\n", - " .dataframe tbody tr th:only-of-type {\n", - " vertical-align: middle;\n", - " }\n", - "\n", - " .dataframe tbody tr th {\n", - " vertical-align: top;\n", - " }\n", - "\n", - " .dataframe thead th {\n", - " text-align: right;\n", - " }\n", - "</style>\n", - "<table border=\"1\" class=\"dataframe\">\n", - " <thead>\n", - " <tr style=\"text-align: right;\">\n", - " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", - " </tr>\n", - " </thead>\n", - " <tbody>\n", - " <tr>\n", - " <th>mean</th>\n", - " <td>0.000265</td>\n", - " <td>6.721306</td>\n", - " <td>8.426241</td>\n", - " </tr>\n", - " <tr>\n", - " <th>std</th>\n", - " <td>0.000021</td>\n", - " <td>0.091057</td>\n", - " <td>0.133246</td>\n", - " </tr>\n", - " </tbody>\n", - "</table>\n", - "</div>" - ], - "text/plain": [ - " SmoothL1Loss MAE RMSE\n", - "mean 0.000265 6.721306 8.426241\n", - "std 0.000021 0.091057 0.133246" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "metrics_train2.describe().loc[[\"mean\", \"std\"]]" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 10, "id": "13f96119-df3c-4a0d-adeb-f2816ff88587", "metadata": { "tags": [] @@ -985,35 +686,32 @@ " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", + " <th>MAE_val</th>\n", + " <th>RMSE_val</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>mean</th>\n", - " <td>0.009150</td>\n", - " <td>30.096512</td>\n", - " <td>31.593293</td>\n", + " <td>30.366895</td>\n", + " <td>31.763364</td>\n", " </tr>\n", " <tr>\n", " <th>std</th>\n", - " <td>0.006674</td>\n", - " <td>12.822483</td>\n", - " <td>13.912322</td>\n", + " <td>13.052365</td>\n", + " <td>14.098237</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ - " SmoothL1Loss MAE RMSE\n", - "mean 0.009150 30.096512 31.593293\n", - "std 0.006674 12.822483 13.912322" + " MAE_val RMSE_val\n", + "mean 30.366895 31.763364\n", + "std 13.052365 14.098237" ] }, - "execution_count": 22, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -1024,64 +722,10 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 15, "id": "6c6327f8-bcb3-4454-bc9f-a1fe26f8bea6", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "<div>\n", - "<style scoped>\n", - " .dataframe tbody tr th:only-of-type {\n", - " vertical-align: middle;\n", - " }\n", - "\n", - " .dataframe tbody tr th {\n", - " vertical-align: top;\n", - " }\n", - "\n", - " .dataframe thead th {\n", - " text-align: right;\n", - " }\n", - "</style>\n", - "<table border=\"1\" class=\"dataframe\">\n", - " <thead>\n", - " <tr style=\"text-align: right;\">\n", - " <th></th>\n", - " <th>SmoothL1Loss</th>\n", - " <th>MAE</th>\n", - " <th>RMSE</th>\n", - " </tr>\n", - " </thead>\n", - " <tbody>\n", - " <tr>\n", - " <th>mean</th>\n", - " <td>0.001057</td>\n", - " <td>14.137106</td>\n", - " <td>15.214975</td>\n", - " </tr>\n", - " <tr>\n", - " <th>std</th>\n", - " <td>0.001216</td>\n", - " <td>7.679862</td>\n", - " <td>7.979069</td>\n", - " </tr>\n", - " </tbody>\n", - "</table>\n", - "</div>" - ], - "text/plain": [ - " SmoothL1Loss MAE RMSE\n", - "mean 0.001057 14.137106 15.214975\n", - "std 0.001216 7.679862 7.979069" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "metrics_test.describe().loc[[\"mean\", \"std\"]]" ] @@ -1108,9 +752,9 @@ "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" }, "kernelspec": { - "display_name": "np-dev", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "np-dev" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -1122,7 +766,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.9.13" } }, "nbformat": 4,
cross-validation demo error When I was studying the cross-validation page in the official document, I found that the following code would report an error, prompting "['SmoothL1Loss'] not in index". The output of the test metrics does not include these items. Is it because the version update has been modified? ``` metrics_train = pd.DataFrame(columns=METRICS) metrics_test = pd.DataFrame(columns=METRICS) for df_train, df_test in folds: m = NeuralProphet(**params) train = m.fit(df=df_train, freq="MS") test = m.test(df=df_test) metrics_train = metrics_train.append(train[METRICS].iloc[-1]) metrics_test = metrics_test.append(test[METRICS].iloc[-1] ``` test metrics like this ![image](https://user-images.githubusercontent.com/43956952/227927644-973bec3d-3695-44d4-a1f2-0b1a7667801f.png)
@CrazyBalthazar thanks for reporting the issue, I can confirm it. we'll get back to you with a fix. @karl-richter seems the return values of the metrics in our `test` function changed with the lightning migration. do you have any idea how to retrieve the prior metrics for `test` calls? Hi, we are planning to include this in our V1 release in three weeks, but have no capacity work on this. If someone want to take over this issue, you are welcome to do so
2023-04-21T01:48:58
ourownstory/neural_prophet
1,300
ourownstory__neural_prophet-1300
[ "1294", "1257" ]
b9ffa72753eeb805e427c4e1b93522a36cce0325
diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -397,6 +397,11 @@ def _check_dataframe( pd.DataFrame checked dataframe """ + if len(df) < (model.n_forecasts + model.n_lags) and not future: + raise ValueError( + "Dataframe has less than n_forecasts + n_lags rows. " + "Forecasting not possible. Please either use a larger dataset, or adjust the model parameters." + ) df, _, _, _ = df_utils.prep_or_copy_df(df) df, regressors_to_remove, lag_regressors_to_remove = df_utils.check_dataframe( df=df,
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -1355,6 +1355,12 @@ def test_get_latest_forecast(): with pytest.raises(Exception): m.get_latest_forecast(forecast, include_previous_forecasts=10) + log.info("Not enough values in df") + df3 = df.iloc[:10].copy(deep=True) + df3["ID"] = "df3" + with pytest.raises(Exception): + metrics_df = m.fit(df3, freq="D") + def test_metrics(): log.info("testing: Plotting")
Unclear error when doing auto-regression with too small dataset ### Discussed in https://github.com/ourownstory/neural_prophet/discussions/1257 <div type='discussions-op-text'> <sup>Originally posted by **pzaika** April 11, 2023</sup> When using auto-regression with a dataset that is smaller than the requested number of forecasts + lags (specifically when n_samples = len(df) - max_lags + 1 - n_forecasts is not positive) the fit method throws an IndexError. This hides the real problem, which is that the user is trying to train a model on not enough data. I think that it would be better to raise an error which tells the user to lower the number of forecasts or lags they are asking for.</div>
2023-04-21T23:53:05
ourownstory/neural_prophet
1,331
ourownstory__neural_prophet-1331
[ "1317" ]
24fd354d6ccdd16cf29a67da3c0c463d3933ff43
diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -314,20 +314,12 @@ def tabularize_univariate_datetime( inputs = OrderedDict({}) def _stride_time_features_for_forecasts(x): - # only for case where n_lags > 0 - if x.dtype != np.float64: - dtype = np.datetime64 - else: - dtype = np.float64 - return np.array([x[i + max_lags - n_lags : i + max_lags + n_forecasts] for i in range(n_samples)], dtype=dtype) + return np.array( + [x[i + max_lags - n_lags : i + max_lags + n_forecasts] for i in range(n_samples)], dtype=x.dtype + ) def _stride_future_time_features_for_forecasts(x): - # only for case where n_lags > 0 - if x.dtype != np.float64: - dtype = np.datetime64 - else: - dtype = np.float64 - return np.array([x[max_lags + i : max_lags + i + n_forecasts] for i in range(n_samples)], dtype=dtype) + return np.array([x[max_lags + i : max_lags + i + n_forecasts] for i in range(n_samples)], dtype=x.dtype) def _stride_lagged_features(df_col_name, feature_dims): # only for case where max_lags > 0
Float32 target values encounter TypeError: can't convert np.ndarray of type numpy.datetime64. **Prerequisites** * [X] Put an X between the brackets on this line if you have done all of the following: * Reproduced the problem in a new virtualenv with only neuralprophet installed, directly from github: ```shell git clone <copied link from github> cd neural_prophet pip install . ``` * Checked the Answered Questions on the Github Disscussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your bug might actually be solvable by getting a question answered: * Please [post a package use question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-get-help-using-neuralprophet) * Please [post a forecasting best practice question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-forecasting-best-practices) * Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Describe the bug** Type error encountered predicting a np.float32 column, appears to be in time_dataset.py. **To Reproduce** ```import pandas as pd from neuralprophet import NeuralProphet df = pd.read_csv("https://github.com/ourownstory/neuralprophet-data/raw/main/kaggle-energy/datasets/tutorial01.csv") df.y = df.y.astype('float32') m = NeuralProphet() metrics = m.fit(df) ``` **Expected behavior** The model should have no problem fitting. **What actually happens** ``` File "/home/gordon/src/neural_prophet/neuralprophet/time_dataset.py", line 145, in init_after_tabularized self.targets = torch.from_numpy(targets).type(targets_dtype).unsqueeze(dim=2) TypeError: can't convert np.ndarray of type numpy.datetime64. The only supported types are: float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint8, and bool. ``` **Environment (please complete the following information):** Reproduced on python 3.10.11. The bug is present in neuralprophet 0.6.0, but not in 0.5.4, I tried both PyPI and installed from cloned github repo.
I looked into this and I think it's an issue with the `_stride_future_time_features_for_forecasts` function within `tabularize_univariate_datetime`. It looks like the target values will get transformed to `np.datetime64` unless the input is `np.float64`. ``` def _stride_future_time_features_for_forecasts(x): # only for case where n_lags > 0 if x.dtype != np.float64: dtype = np.datetime64 else: dtype = np.float64 return np.array([x[max_lags + i : max_lags + i + n_forecasts] for i in range(n_samples)], dtype=dtype) ``` @LeonieFreisinger I think you made this [change](https://github.com/ourownstory/neural_prophet/commit/ba949c76903f4c20ac58049cae5bcd844043b757)?
2023-05-17T23:07:54
ourownstory/neural_prophet
1,395
ourownstory__neural_prophet-1395
[ "1393" ]
89e3433ac0d8ee2ee7de12a78736e1ce15f82e76
diff --git a/neuralprophet/plot_model_parameters_matplotlib.py b/neuralprophet/plot_model_parameters_matplotlib.py --- a/neuralprophet/plot_model_parameters_matplotlib.py +++ b/neuralprophet/plot_model_parameters_matplotlib.py @@ -265,9 +265,12 @@ def plot_trend(m, quantile, ax=None, plot_name="Trend", figsize=(10, 6), df_name trend_1 = trend_0 else: if m.model.config_trend.trend_global_local == "local": - trend_1 = trend_0 + m.model.trend.trend_k0[quantile_index, m.model.id_dict[df_name]].detach().numpy() + trend_1 = ( + trend_0 + + m.model.trend.trend_k0[quantile_index, m.model.id_dict[df_name]].detach().numpy().squeeze() + ) else: - trend_1 = trend_0 + m.model.trend.trend_k0[quantile_index, 0].detach().numpy() + trend_1 = trend_0 + m.model.trend.trend_k0[quantile_index, 0].detach().numpy().squeeze() data_params = m.config_normalization.get_data_params(df_name) shift = data_params["y"].shift
Re-add compatibility with NumPy >=1.24 **Prerequisites** * [x] Put an X between the brackets on this line if you have done all of the following: * Checked the Answered Questions on the Github Disscussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your issue might need further discussing before being defined as a feature request: Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Is your feature request related to a problem? Please describe.** We are currently rebuilding our Apache Airflow setup with the use of [constraints files](https://raw.githubusercontent.com/apache/airflow/constraints-2.6.3/constraints-3.10.txt), to keep all the dependencies in check somehow. However this means we _have_ to use NumPy 1.24, and unfortunately this version was declared non-compatible in #1080. However there were no issues linked, so I have no idea if this is still a problem, but there were plenty of updated to matplotlib in the mean time and I hope this issue is gone, maybe @karl-richter can answer this? **Describe the solution you'd like** Declare this library compatible with NumPy 1.24 or even 1.25. **Describe alternatives you've considered** None. **Additional context**
2023-08-08T19:42:15
ourownstory/neural_prophet
1,407
ourownstory__neural_prophet-1407
[ "1367" ]
fcc13e95ebde6257426ecd63a43f44b5646a16be
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1101,13 +1101,15 @@ def predict(self, df: pd.DataFrame, decompose: bool = True, raw: bool = False): self.predict_steps = self.n_forecasts return df - def test(self, df: pd.DataFrame): + def test(self, df: pd.DataFrame, verbose: bool = True): """Evaluate model on holdout data. Parameters ---------- df : pd.DataFrame dataframe containing column ``ds``, ``y``, and optionally``ID`` with with holdout data + verbose : bool + If True, prints the test results. Returns ------- pd.DataFrame @@ -1132,7 +1134,7 @@ def test(self, df: pd.DataFrame): ) loader = self._init_val_loader(df) # Use Lightning to calculate metrics - val_metrics = self.trainer.test(self.model, dataloaders=loader) + val_metrics = self.trainer.test(self.model, dataloaders=loader, verbose=verbose) val_metrics_df = pd.DataFrame(val_metrics) # TODO Check whether supported by Lightning if not self.config_normalization.global_normalization:
Make Lightning trainer .test() verbosity configurable **Prerequisites** * [X] Put an X between the brackets on this line if you have done all of the following: * Checked the Answered Questions on the Github Disscussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your issue might need further discussing before being defined as a feature request: Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Is your feature request related to a problem? Please describe.** When doing cross-validation, my output gets flooded with the Lightning .test() output, which is frustrating. It drowns out the per-trial results, which is what I'm actually interested in, and lengthens the output by 20x. **Describe the solution you'd like** Have some sort of verbosity parameter to `.test()` which then gets passed into the `verbosity={value}` parameter of `self.trainer.test`. **Describe alternatives you've considered** There seems to be no good alternative -- Lightning doesn't use a logger, it just `print()`s the output based on the verbose flag. What I've ended up doing in the meantime is disabling stdout altogether when running `.test()`, then re-enabling it before printing out the trial results. **Additional context** N/A
Hi @w-biggs, thanks for raising this. Most of our core-developers are on summer break right now, so I'll try to help. Unfortunately, I don't get the problem. Could you elaborate on it? When cross-validating, every time a model is tested, Lightning outputs something like: ```──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── Test metric DataLoader 0 ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── Loss_test 0.0026712114922702312 MAE_val 18.709611892700195 RMSE_val 22.74985122680664 RegLoss_test 0.0 ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ``` If I'm doing 100 tuning trials with 5 different cross-validation splits, that means this output gets printed 500 times. If I'm working in a notebook, this becomes super unwieldy, and it drowns out the info I'm actually interested in. Lightning's `.test()` function, which is what is outputting this, has a verbosity parameter that can be used to disable this output. It would be nice if NeuralProphet's `.test()` function could also take in a verbosity parameter, which would then be passed to Lightning's `.test()` function when it's called. The easiest solution to me is to surface the `verbose` argument in the `.test()` API. Another verbosity issue I faced is on `fit` method. So if `learning_rate = 'auto'`, then there's no way one can disable the learning rate finder's progress bar, even when `minimal = True` or `progress=False` as those only control the main fitting progress bar.
2023-08-24T18:24:21
ourownstory/neural_prophet
1,411
ourownstory__neural_prophet-1411
[ "1391" ]
66a39bd87c6854bf7ab790361367f51285115ea5
diff --git a/neuralprophet/plot_utils.py b/neuralprophet/plot_utils.py --- a/neuralprophet/plot_utils.py +++ b/neuralprophet/plot_utils.py @@ -304,10 +304,6 @@ def get_valid_configuration( # move to utils ) else: df_name = m.id_list[0] - log.warning( - "Local model set with > 1 time series in the pd.DataFrame. Plotting components of first \ - time series. " - ) else: log.warning("Local normalization set, but df_name is None. Using global data params instead.") df_name = "__df__"
Plotting warning for global model is wrong When fitting a global model and calling `m.plot_parameters` the following warning makes no sense as the parameters are all the same for every ID. <img width="1113" alt="image" src="https://github.com/ourownstory/neural_prophet/assets/42536262/29d34879-50d2-43f3-a906-97d70fc1c109">
E.g. in tutorial https://neuralprophet.com/tutorials/tutorial09.html, all plots for different IDs look the same (as it should be, so no warning is needed) `m.plot_parameters(df_name=df['ID'].unique()[id])` for id = 0, 1, 2, 3, ...
2023-08-29T08:01:18
ourownstory/neural_prophet
1,531
ourownstory__neural_prophet-1531
[ "1493" ]
305de42ca6a69447a7055b94813394745dc55a9d
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -878,6 +878,9 @@ def fit( pd.DataFrame metrics with training and potentially evaluation metrics """ + if self.fitted: + raise RuntimeError("Model has been fitted already. Please initialize a new model to fit again.") + # Configuration if epochs is not None: self.config_train.epochs = epochs
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -1705,3 +1705,16 @@ def test_unused_future_regressors(): m.add_future_regressor("price") m.add_lagged_regressor("cost") m.fit(df, freq="D") + + +def test_fit_twice_error(): + log.info("testing: Fit model twice error") + df = pd.read_csv(PEYTON_FILE, nrows=10) + m = NeuralProphet( + epochs=1, + batch_size=10, + learning_rate=1, + ) + _ = m.fit(df, freq="D") + with pytest.raises(RuntimeError): + _ = m.fit(df, freq="D")
Error attempting to load model and resume training Using either 1.0.0.4rc4 or 1.0.0.rc5 when I save a model, reload, and call `fit()`, I get an exception: `'NoneType' object has no attribute 'global_local'` The error occurs because `self.config_seasonality` is `None` upon reload. ``` self.num_seasonalities_modelled = len(self.id_list) if self.config_seasonality.global_local == "local" else 1 ``` The following script triggers the problem: ``` import pandas as pd from neuralprophet import NeuralProphet import neuralprophet.utils import numpy as np import datetime rows = 1000 df = pd.DataFrame({ 'ds': [datetime.datetime(2020, 1, 1) + datetime.timedelta(minutes=i) for i in range(rows)], 'y': np.sin(2 * np.pi * np.arange(rows) / 100) }) model = NeuralProphet(n_lags=50, n_forecasts=5, epochs=20) metrics = model.fit(df) neuralprophet.utils.save(model, "repro.np") future = model.make_future_dataframe(df, periods=100) forecast = model.predict(future) loaded_model = neuralprophet.utils.load("repro.np") retrain_metrics = loaded_model.fit(df) # error occurs here print("Done.") ``` FWIW, in my own (non-repro) code, it actually gets past this (it's unclear to me why), and fails downstream with `unsupported operand type(s) for -: 'numpy.ndarray' and 'Timestamp'` here: ``` arithmetic_op ([env]\lib\python3.10\site-packages\pandas\core\ops\array_ops.py:218) _arith_method ([env]\lib\python3.10\site-packages\pandas\core\base.py:1325) _arith_method ([env]\lib\python3.10\site-packages\pandas\core\series.py:6259) __sub__ ([env]\lib\python3.10\site-packages\pandas\core\arraylike.py:110) new_method ([env]\lib\python3.10\site-packages\pandas\core\ops\common.py:72) flex_wrapper ([env]\lib\python3.10\site-packages\pandas\core\ops\__init__.py:197) normalize ([env]\lib\python3.10\site-packages\neuralprophet\df_utils.py:416) _normalize ([env]\lib\python3.10\site-packages\neuralprophet\data\transform.py:32) _init_train_loader ([env]\lib\python3.10\site-packages\neuralprophet\forecaster.py:2560) _train ([env]\lib\python3.10\site-packages\neuralprophet\forecaster.py:2642) fit ([env]\lib\python3.10\site-packages\neuralprophet\forecaster.py:965) ... ``` My environment (under WSL) is stripped down to this .yml: ``` name: my_env channels: - conda-forge - pytorch - nvidia - defaults dependencies: - python=3.10 - pip - pip: - neuralprophet==1.0.0rc5 ```
Hi @tg2k, thanks for bringing this up - we are working on this. Do you face the same issue, when you are using pytorch's [load](https://pytorch.org/docs/stable/generated/torch.load.html) or [save](https://pytorch.org/docs/stable/generated/torch.save.html) functions? @SimonWittner In the repro code, if I substitute with `torch.save(model, "repro.np")` and `loaded_model = torch.load("repro.np")`, it fails exactly the same way. I had a thought that the `rows` value was small enough to make this a corner case. At 1000 it is less than a day. The `config_seasonality` ends up set before `fit()` and `None` after `fit()`. This isn't to suggest that it shouldn't be addressed, but just to clarify. Setting `rows = 10000` makes the test pass the `config_seasonality` issue, and gets it to the `unsupported operand` issue. This is still with the direct calls to the Torch load/save methods. Hi @tg2k We currently do not have support for re-fitting a fitted model - the model needs to be re-initialized. The error you are encountering is not conneced to saving and loading, but to fitting a model twice - once before saving and once after loading. I will add a proper Error to make this clear, sorry for the confusion! If you would like to help us support re-training / continued training, you are very welcome to do a PR changing the configuration init/storing during fitting and re-fitting.
2024-02-12T23:56:03
ourownstory/neural_prophet
1,582
ourownstory__neural_prophet-1582
[ "1575" ]
23543560b4ed278e84d1fd0f119d332342336d0d
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -457,19 +457,6 @@ def __init__( drop_missing=drop_missing, ) - # Training - self.config_train = configure.Train( - quantiles=quantiles, - learning_rate=learning_rate, - epochs=epochs, - batch_size=batch_size, - loss_func=loss_func, - optimizer=optimizer, - newer_samples_weight=newer_samples_weight, - newer_samples_start=newer_samples_start, - trend_reg_threshold=trend_reg_threshold, - ) - if isinstance(collect_metrics, list): log.info( DeprecationWarning( @@ -499,6 +486,19 @@ def __init__( trend_local_reg=trend_local_reg, ) + # Training + self.config_train = configure.Train( + quantiles=quantiles, + learning_rate=learning_rate, + epochs=epochs, + batch_size=batch_size, + loss_func=loss_func, + optimizer=optimizer, + newer_samples_weight=newer_samples_weight, + newer_samples_start=newer_samples_start, + trend_reg_threshold=self.config_trend.trend_reg_threshold, + ) + # Seasonality self.config_seasonality = configure.ConfigSeasonality( mode=seasonality_mode,
Torch error under some optimization parameters during attempted Optuna outer-loop optimization **Prerequisites** * [ ] Put an X between the brackets on this line if you have done all of the following: * Reproduced the problem in a new virtualenv with only neuralprophet installed, directly from github: ```shell git clone <copied link from github> cd neural_prophet pip install . ``` * Checked the Answered Questions on the Github Discussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your bug might actually be solvable by getting a question answered: * Please [post a package use question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-get-help-using-neuralprophet) * Please [post a forecasting best practice question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-forecasting-best-practices) * Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Describe the bug** Torch error under some optimization parameters during attempted Optuna outer-loop optimization. Re-constructing the Optuna study will sometimes result in a run that works, but I expect at some point in optimization this error will return Using the following configuration (optuna iteration) ``` Trial 0 failed with parameters: {'n_lags': 1, 'yearly_seasonality': 39, 'weekly_seasonality': 2, 'seasonality_reg': 8.209708790860189, 'trend_reg': 53.074534968657375, 'ar_reg': 0.18826427641273322, 'seasonality_mode': 'additive', 'trend_reg_threshold': True, 'ar_layers': [4, 4], 'changepoints_range': 0.9355036539269569, 'n_changepoints': 19} ``` ``` File [/scratch/devgab/C3_admissions/timeseries/lib/python3.12/site-packages/neuralprophet/utils.py:141](http://10.116.134.8:8888/lab/tree/timeseries/lib/python3.12/site-packages/neuralprophet/utils.py#line=140), in reg_func_trend(weights, threshold) 139 abs_weights = torch.abs(weights) 140 if threshold is not None and not math.isclose(threshold, 0): --> 141 abs_weights = torch.clamp(abs_weights - threshold, min=0.0) 142 reg = torch.mean(torch.sum(abs_weights, dim=-1)).squeeze() 143 return reg RuntimeError: Subtraction, the `-` operator, with a bool tensor is not supported. If you are trying to invert a mask, use the `~` or `logical_not()` operator instead. ``` **To Reproduce** Reproducing is currently random due to parameter sampling in Optuna **Expected behavior** No crash. **What actually happens** ``` RuntimeError: Subtraction, the `-` operator, with a bool tensor is not supported. If you are trying to invert a mask, use the `~` or `logical_not()` operator instead. ``` **Screenshots** If applicable, add screenshots and console printouts to help explain your problem. **Environment (please complete the following information):** ``` $ micromamba list List of packages in environment: "/scratch/devgab/C3_admissions/timeseries" Name Version Build Channel ──────────────────────────────────────────────────────────────────────────────────────────── _libgcc_mutex 0.1 conda_forge conda-forge _openmp_mutex 4.5 2_gnu conda-forge alembic 1.13.1 pyhd8ed1ab_1 conda-forge ansi2html 1.9.1 py312h7900ff3_0 conda-forge anyio 4.3.0 pyhd8ed1ab_0 conda-forge argon2-cffi 23.1.0 pyhd8ed1ab_0 conda-forge argon2-cffi-bindings 21.2.0 py312h98912ed_4 conda-forge arrow 1.3.0 pyhd8ed1ab_0 conda-forge asttokens 2.4.1 pyhd8ed1ab_0 conda-forge async-lru 2.0.4 pyhd8ed1ab_0 conda-forge attrs 23.2.0 pyh71513ae_0 conda-forge babel 2.14.0 pyhd8ed1ab_0 conda-forge beautifulsoup4 4.12.3 pyha770c72_0 conda-forge binutils_impl_linux-64 2.40 ha1999f0_1 conda-forge binutils_linux-64 2.40 hdade7a5_3 conda-forge blas 1.0 mkl conda-forge bleach 6.1.0 pyhd8ed1ab_0 conda-forge blinker 1.8.2 pyhd8ed1ab_0 conda-forge brotli 1.1.0 hd590300_1 conda-forge brotli-bin 1.1.0 hd590300_1 conda-forge brotli-python 1.1.0 py312h30efb56_1 conda-forge bzip2 1.0.8 hd590300_5 conda-forge ca-certificates 2024.2.2 hbcca054_0 conda-forge cached-property 1.5.2 hd8ed1ab_1 conda-forge cached_property 1.5.2 pyha770c72_1 conda-forge certifi 2024.2.2 pyhd8ed1ab_0 conda-forge cffi 1.16.0 py312hf06ca03_0 conda-forge charset-normalizer 3.3.2 pyhd8ed1ab_0 conda-forge click 8.1.7 unix_pyh707e725_0 conda-forge cmdstan 2.33.1 hff4ab46_0 conda-forge cmdstanpy 1.2.2 pyhd8ed1ab_0 conda-forge colorama 0.4.6 pyhd8ed1ab_0 conda-forge colorlog 6.8.2 py312h7900ff3_0 conda-forge comm 0.2.2 pyhd8ed1ab_0 conda-forge contourpy 1.2.1 py312h8572e83_0 conda-forge convertdate 2.4.0 pyhd8ed1ab_0 conda-forge cuda-cudart 12.1.105 0 nvidia cuda-cupti 12.1.105 0 nvidia cuda-libraries 12.1.0 0 nvidia cuda-nvrtc 12.1.105 0 nvidia cuda-nvtx 12.1.105 0 nvidia cuda-opencl 12.4.127 0 nvidia cuda-runtime 12.1.0 0 nvidia cycler 0.12.1 pyhd8ed1ab_0 conda-forge dash 0.39.0 py_0 plotly dash-core-components 0.44.0 py_0 plotly dash-html-components 0.14.0 py_0 plotly dash-renderer 0.20.0 py_0 plotly dash-table 3.6.0 py_0 plotly debugpy 1.8.1 py312h30efb56_0 conda-forge decorator 5.1.1 pyhd8ed1ab_0 conda-forge defusedxml 0.7.1 pyhd8ed1ab_0 conda-forge entrypoints 0.4 pyhd8ed1ab_0 conda-forge ephem 4.1.5 py312h98912ed_1 conda-forge exceptiongroup 1.2.0 pyhd8ed1ab_2 conda-forge executing 2.0.1 pyhd8ed1ab_0 conda-forge expat 2.6.2 h59595ed_0 conda-forge filelock 3.14.0 pyhd8ed1ab_0 conda-forge flask 3.0.3 pyhd8ed1ab_0 conda-forge flask-compress 1.4.0 py_0 plotly font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge font-ttf-inconsolata 3.000 h77eed37_0 conda-forge font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge font-ttf-ubuntu 0.83 h77eed37_2 conda-forge fontconfig 2.14.2 h14ed4e7_0 conda-forge fonts-conda-forge 1 0 conda-forge fonttools 4.52.4 py312h9a8786e_0 conda-forge fqdn 1.5.1 pyhd8ed1ab_0 conda-forge freetype 2.12.1 h267a509_2 conda-forge gcc_impl_linux-64 12.3.0 h58ffeeb_7 conda-forge gcc_linux-64 12.3.0 h6477408_3 conda-forge gmp 6.3.0 h59595ed_1 conda-forge gmpy2 2.1.5 py312h1d5cde6_1 conda-forge greenlet 3.0.3 py312h30efb56_0 conda-forge gxx_impl_linux-64 12.3.0 h2a574ab_7 conda-forge gxx_linux-64 12.3.0 h4a1b8e8_3 conda-forge h11 0.14.0 pyhd8ed1ab_0 conda-forge h2 4.1.0 pyhd8ed1ab_0 conda-forge holidays 0.49 pyhd8ed1ab_0 conda-forge hpack 4.0.0 pyh9f0ad1d_0 conda-forge httpcore 1.0.5 pyhd8ed1ab_0 conda-forge httpx 0.27.0 pyhd8ed1ab_0 conda-forge hyperframe 6.0.1 pyhd8ed1ab_0 conda-forge icu 73.2 h59595ed_0 conda-forge idna 3.7 pyhd8ed1ab_0 conda-forge importlib-metadata 7.1.0 pyha770c72_0 conda-forge importlib_metadata 7.1.0 hd8ed1ab_0 conda-forge importlib_resources 6.4.0 pyhd8ed1ab_0 conda-forge intel-openmp 2022.1.0 h9e868ea_3769 ipykernel 6.29.3 pyhd33586a_0 conda-forge ipython 8.24.0 pyh707e725_0 conda-forge ipywidgets 8.1.3 pyhd8ed1ab_0 conda-forge isoduration 20.11.0 pyhd8ed1ab_0 conda-forge itsdangerous 2.2.0 pyhd8ed1ab_0 conda-forge jedi 0.19.1 pyhd8ed1ab_0 conda-forge jinja2 3.1.4 pyhd8ed1ab_0 conda-forge joblib 1.4.2 pyhd8ed1ab_0 conda-forge json5 0.9.25 pyhd8ed1ab_0 conda-forge jsonpointer 2.4 py312h7900ff3_3 conda-forge jsonschema 4.22.0 pyhd8ed1ab_0 conda-forge jsonschema-specifications 2023.12.1 pyhd8ed1ab_0 conda-forge jsonschema-with-format-nongpl 4.22.0 pyhd8ed1ab_0 conda-forge jupyter-collaboration 2.1.0 pyhd8ed1ab_0 conda-forge jupyter-dash 0.4.2 py_0 plotly jupyter-lsp 2.2.5 pyhd8ed1ab_0 conda-forge jupyter_client 8.6.2 pyhd8ed1ab_0 conda-forge jupyter_core 5.7.2 py312h7900ff3_0 conda-forge jupyter_events 0.10.0 pyhd8ed1ab_0 conda-forge jupyter_server 2.14.0 pyhd8ed1ab_0 conda-forge jupyter_server_fileid 0.9.2 pyhd8ed1ab_0 conda-forge jupyter_server_terminals 0.5.3 pyhd8ed1ab_0 conda-forge jupyter_ydoc 2.0.1 pyhd8ed1ab_0 conda-forge jupyterlab 4.2.1 pyhd8ed1ab_0 conda-forge jupyterlab_pygments 0.3.0 pyhd8ed1ab_1 conda-forge jupyterlab_server 2.27.2 pyhd8ed1ab_0 conda-forge jupyterlab_widgets 3.0.11 pyhd8ed1ab_0 conda-forge kaleido-core 0.2.1 h3644ca4_0 conda-forge kernel-headers_linux-64 2.6.32 he073ed8_17 conda-forge keyutils 1.6.1 h166bdaf_0 conda-forge kiwisolver 1.4.5 py312h8572e83_1 conda-forge krb5 1.21.2 h659d440_0 conda-forge lcms2 2.16 hb7c19ff_0 conda-forge ld_impl_linux-64 2.40 hf3520f5_1 conda-forge lerc 4.0.0 h27087fc_0 conda-forge libblas 3.9.0 16_linux64_mkl conda-forge libbrotlicommon 1.1.0 hd590300_1 conda-forge libbrotlidec 1.1.0 hd590300_1 conda-forge libbrotlienc 1.1.0 hd590300_1 conda-forge libcblas 3.9.0 16_linux64_mkl conda-forge libcublas 12.1.0.26 0 nvidia libcufft 11.0.2.4 0 nvidia libcufile 1.9.1.3 0 nvidia libcurand 10.3.5.147 0 nvidia libcusolver 11.4.4.55 0 nvidia libcusparse 12.0.2.55 0 nvidia libdeflate 1.20 hd590300_0 conda-forge libedit 3.1.20191231 he28a2e2_2 conda-forge libexpat 2.6.2 h59595ed_0 conda-forge libffi 3.4.2 h7f98852_5 conda-forge libgcc-devel_linux-64 12.3.0 h0223996_107 conda-forge libgcc-ng 13.2.0 h77fa898_7 conda-forge libgfortran-ng 13.2.0 h69a702a_7 conda-forge libgfortran5 13.2.0 hca663fb_7 conda-forge libgomp 13.2.0 h77fa898_7 conda-forge libhwloc 2.10.0 default_h5622ce7_1001 conda-forge libiconv 1.17 hd590300_2 conda-forge libjpeg-turbo 3.0.0 hd590300_1 conda-forge liblapack 3.9.0 16_linux64_mkl conda-forge libnpp 12.0.2.50 0 nvidia libnsl 2.0.1 hd590300_0 conda-forge libnvjitlink 12.1.105 0 nvidia libnvjpeg 12.1.1.14 0 nvidia libpng 1.6.43 h2797004_0 conda-forge libsanitizer 12.3.0 hb8811af_7 conda-forge libsodium 1.0.18 h36c2ea0_1 conda-forge libsqlite 3.45.3 h2797004_0 conda-forge libstdcxx-devel_linux-64 12.3.0 h0223996_107 conda-forge libstdcxx-ng 13.2.0 hc0a3c3a_7 conda-forge libtiff 4.6.0 h1dd3fc0_3 conda-forge libuuid 2.38.1 h0b41bf4_0 conda-forge libwebp-base 1.4.0 hd590300_0 conda-forge libxcb 1.15 h0b41bf4_0 conda-forge libxcrypt 4.4.36 hd590300_1 conda-forge libxml2 2.12.7 hc051c1a_0 conda-forge libzlib 1.2.13 h4ab18f5_6 conda-forge llvm-openmp 15.0.7 h0cdce71_0 conda-forge lunarcalendar 0.0.9 py_0 conda-forge make 4.3 hd18ef5c_1 conda-forge mako 1.3.5 pyhd8ed1ab_0 conda-forge markupsafe 2.1.5 py312h98912ed_0 conda-forge mathjax 2.7.7 ha770c72_3 conda-forge matplotlib-base 3.8.4 py312h20ab3a6_2 conda-forge matplotlib-inline 0.1.7 pyhd8ed1ab_0 conda-forge mistune 3.0.2 pyhd8ed1ab_0 conda-forge mkl 2022.1.0 hc2b9512_224 mpc 1.3.1 hfe3b2da_0 conda-forge mpfr 4.2.1 h9458935_1 conda-forge mpmath 1.3.0 pyhd8ed1ab_0 conda-forge munkres 1.1.4 pyh9f0ad1d_0 conda-forge nbclient 0.10.0 pyhd8ed1ab_0 conda-forge nbconvert-core 7.16.4 pyhd8ed1ab_0 conda-forge nbformat 5.10.4 pyhd8ed1ab_0 conda-forge ncurses 6.5 h59595ed_0 conda-forge nest-asyncio 1.6.0 pyhd8ed1ab_0 conda-forge networkx 3.3 pyhd8ed1ab_1 conda-forge notebook-shim 0.2.4 pyhd8ed1ab_0 conda-forge nspr 4.35 h27087fc_0 conda-forge nss 3.100 hca3bf56_0 conda-forge numpy 1.26.4 py312heda63a1_0 conda-forge openjpeg 2.5.2 h488ebb8_0 conda-forge openssl 3.3.0 h4ab18f5_3 conda-forge optuna 3.6.0 pyhd8ed1ab_0 conda-forge overrides 7.7.0 pyhd8ed1ab_0 conda-forge packaging 24.0 pyhd8ed1ab_0 conda-forge pandas 2.2.2 py312h1d6d2e6_1 conda-forge pandocfilters 1.5.0 pyhd8ed1ab_0 conda-forge parso 0.8.4 pyhd8ed1ab_0 conda-forge pexpect 4.9.0 pyhd8ed1ab_0 conda-forge pickleshare 0.7.5 py_1003 conda-forge pillow 10.3.0 py312hdcec9eb_0 conda-forge pip 24.0 pyhd8ed1ab_0 conda-forge pkgutil-resolve-name 1.3.10 pyhd8ed1ab_1 conda-forge platformdirs 4.2.2 pyhd8ed1ab_0 conda-forge plotly 5.22.0 py_0 plotly polars 0.20.30 py312hc7f843c_0 conda-forge prometheus_client 0.20.0 pyhd8ed1ab_0 conda-forge prompt-toolkit 3.0.42 pyha770c72_0 conda-forge prophet 1.1.5 py312h4f541b2_1 conda-forge psutil 5.9.8 py312h98912ed_0 conda-forge pthread-stubs 0.4 h36c2ea0_1001 conda-forge ptyprocess 0.7.0 pyhd3deb0d_0 conda-forge pure_eval 0.2.2 pyhd8ed1ab_0 conda-forge pycparser 2.22 pyhd8ed1ab_0 conda-forge pycrdt 0.8.25 py312h4413252_0 conda-forge pycrdt-websocket 0.13.4 pyhd8ed1ab_0 conda-forge pygments 2.18.0 pyhd8ed1ab_0 conda-forge pymeeus 0.5.12 pyhd8ed1ab_0 conda-forge pyparsing 3.1.2 pyhd8ed1ab_0 conda-forge pysocks 1.7.1 pyha2e5f31_6 conda-forge python 3.12.3 hab00c5b_0_cpython conda-forge python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge python-fastjsonschema 2.19.1 pyhd8ed1ab_0 conda-forge python-json-logger 2.0.7 pyhd8ed1ab_0 conda-forge python-kaleido 0.2.1 pyhd8ed1ab_0 conda-forge python-tzdata 2024.1 pyhd8ed1ab_0 conda-forge python_abi 3.12 4_cp312 conda-forge pytorch 2.3.0 py3.12_cuda12.1_cudnn8.9.2_0 pytorch pytorch-cuda 12.1 ha16c6d3_5 pytorch pytorch-mutex 1.0 cuda pytorch pytz 2024.1 pyhd8ed1ab_0 conda-forge pyyaml 6.0.1 py312h98912ed_1 conda-forge pyzmq 26.0.3 py312h8fd38d8_0 conda-forge readline 8.2 h8228510_1 conda-forge referencing 0.35.1 pyhd8ed1ab_0 conda-forge requests 2.32.2 pyhd8ed1ab_0 conda-forge retrying 1.3.3 py_2 conda-forge rfc3339-validator 0.1.4 pyhd8ed1ab_0 conda-forge rfc3986-validator 0.1.1 pyh9f0ad1d_0 conda-forge rpds-py 0.18.1 py312h4413252_0 conda-forge scikit-base 0.7.8 pyhecae5ae_1 conda-forge scikit-learn 1.4.2 py312h1fcc3ea_1 conda-forge scipy 1.13.1 py312hc2bc53b_0 conda-forge send2trash 1.8.3 pyh0d859eb_0 conda-forge setuptools 70.0.0 pyhd8ed1ab_0 conda-forge six 1.16.0 pyh6c4a22f_0 conda-forge sktime 0.29.0 py312h7900ff3_1 conda-forge sniffio 1.3.1 pyhd8ed1ab_0 conda-forge soupsieve 2.5 pyhd8ed1ab_1 conda-forge sqlalchemy 2.0.30 py312h9a8786e_0 conda-forge sqlite 3.45.3 h2c6b66d_0 conda-forge sqlite-anyio 0.2.0 pyhd8ed1ab_0 conda-forge stack_data 0.6.2 pyhd8ed1ab_0 conda-forge stanio 0.5.0 pyhd8ed1ab_0 conda-forge sympy 1.12 pypyh9d50eac_103 conda-forge sysroot_linux-64 2.12 he073ed8_17 conda-forge tbb 2021.12.0 h297d8ca_1 conda-forge tbb-devel 2021.12.0 h7c56ddd_1 conda-forge tenacity 8.3.0 pyhd8ed1ab_0 conda-forge terminado 0.18.1 pyh0d859eb_0 conda-forge threadpoolctl 3.5.0 pyhc1e730c_0 conda-forge tinycss2 1.3.0 pyhd8ed1ab_0 conda-forge tk 8.6.13 noxft_h4845f30_101 conda-forge tomli 2.0.1 pyhd8ed1ab_0 conda-forge tornado 6.4 py312h98912ed_0 conda-forge tqdm 4.66.4 pyhd8ed1ab_0 conda-forge traitlets 5.14.3 pyhd8ed1ab_0 conda-forge types-python-dateutil 2.9.0.20240316 pyhd8ed1ab_0 conda-forge typing-extensions 4.11.0 hd8ed1ab_0 conda-forge typing_extensions 4.11.0 pyha770c72_0 conda-forge typing_utils 0.1.0 pyhd8ed1ab_0 conda-forge tzdata 2024a h0c530f3_0 conda-forge uri-template 1.3.0 pyhd8ed1ab_0 conda-forge urllib3 2.2.1 pyhd8ed1ab_0 conda-forge wcwidth 0.2.13 pyhd8ed1ab_0 conda-forge webcolors 1.13 pyhd8ed1ab_0 conda-forge webencodings 0.5.1 pyhd8ed1ab_2 conda-forge websocket-client 1.8.0 pyhd8ed1ab_0 conda-forge werkzeug 3.0.3 pyhd8ed1ab_0 conda-forge wheel 0.43.0 pyhd8ed1ab_1 conda-forge widgetsnbextension 4.0.11 pyhd8ed1ab_0 conda-forge xorg-libxau 1.0.11 hd590300_0 conda-forge xorg-libxdmcp 1.1.3 h7f98852_0 conda-forge xz 5.2.6 h166bdaf_0 conda-forge yaml 0.2.5 h7f98852_2 conda-forge zeromq 4.3.5 h75354e8_4 conda-forge zipp 3.17.0 pyhd8ed1ab_0 conda-forge zlib 1.2.13 h4ab18f5_6 conda-forge zstd 1.5.6 ha6fb4c9_0 conda-forge $ pip list Package Version ------------------------- -------------- absl-py 2.1.0 aiohttp 3.9.5 aiosignal 1.3.1 alembic 1.13.1 ansi2html 0.0.0 anyio 4.3.0 argon2-cffi 23.1.0 argon2-cffi-bindings 21.2.0 arrow 1.3.0 asttokens 2.4.1 async-lru 2.0.4 attrs 23.2.0 Babel 2.14.0 beautifulsoup4 4.12.3 bleach 6.1.0 blinker 1.8.2 Brotli 1.1.0 cached-property 1.5.2 captum 0.7.0 certifi 2024.2.2 cffi 1.16.0 charset-normalizer 3.3.2 click 8.1.7 cmdstanpy 1.2.2 colorama 0.4.6 colorlog 6.8.2 comm 0.2.2 contourpy 1.2.1 convertdate 2.4.0 cycler 0.12.1 dash 2.17.0 dash-core-components 2.0.0 dash-html-components 2.0.0 dash-renderer 0.20.0 dash-table 5.0.0 debugpy 1.8.1 decorator 5.1.1 defusedxml 0.7.1 entrypoints 0.4 ephem 4.1.5 exceptiongroup 1.2.0 executing 2.0.1 fastjsonschema 2.19.1 filelock 3.14.0 Flask 3.0.3 Flask-Compress 1.4.0 fonttools 4.52.4 fqdn 1.5.1 frozenlist 1.4.1 fsspec 2024.5.0 gmpy2 2.1.5 greenlet 3.0.3 grpcio 1.64.0 h11 0.14.0 h2 4.1.0 holidays 0.49 hpack 4.0.0 httpcore 1.0.5 httpx 0.27.0 hyperframe 6.0.1 idna 3.7 importlib_metadata 7.1.0 importlib_resources 6.4.0 ipykernel 6.29.3 ipython 8.24.0 ipywidgets 8.1.3 isoduration 20.11.0 itsdangerous 2.2.0 jedi 0.19.1 Jinja2 3.1.4 joblib 1.4.2 json5 0.9.25 jsonpointer 2.4 jsonschema 4.22.0 jsonschema-specifications 2023.12.1 jupyter_client 8.6.2 jupyter_collaboration 2.1.0 jupyter_core 5.7.2 jupyter-dash 0.4.2 jupyter-events 0.10.0 jupyter-lsp 2.2.5 jupyter_server 2.14.0 jupyter_server_fileid 0.9.2 jupyter_server_terminals 0.5.3 jupyter-ydoc 2.0.1 jupyterlab 4.2.1 jupyterlab_pygments 0.3.0 jupyterlab_server 2.27.2 jupyterlab_widgets 3.0.11 kaleido 0.2.1 kiwisolver 1.4.5 lightning-utilities 0.11.2 LunarCalendar 0.0.9 Mako 1.3.5 Markdown 3.6 MarkupSafe 2.1.5 matplotlib 3.8.4 matplotlib-inline 0.1.7 meteostat 1.6.7 mistune 3.0.2 mpmath 1.3.0 multidict 6.0.5 munkres 1.1.4 nbclient 0.10.0 nbconvert 7.16.4 nbformat 5.10.4 nest_asyncio 1.6.0 networkx 3.3 neuralprophet 0.8.0 notebook_shim 0.2.4 numpy 1.26.4 optuna 3.6.0 orjson 3.10.3 overrides 7.7.0 packaging 24.0 pandas 2.2.2 pandocfilters 1.5.0 parso 0.8.4 pexpect 4.9.0 pickleshare 0.7.5 pillow 10.3.0 pip 24.0 pkgutil_resolve_name 1.3.10 platformdirs 4.2.2 plotly 5.22.0 plotly-resampler 0.10.0 polars 0.20.30 prometheus_client 0.20.0 prompt-toolkit 3.0.42 prophet 1.1.5 protobuf 5.27.0 psutil 5.9.8 ptyprocess 0.7.0 pure-eval 0.2.2 pycparser 2.22 pycrdt 0.8.25 pycrdt-websocket 0.13.4 Pygments 2.18.0 PyMeeus 0.5.12 pyparsing 3.1.2 PySocks 1.7.1 python-dateutil 2.9.0 python-json-logger 2.0.7 pytorch-lightning 1.9.5 pytz 2024.1 PyYAML 6.0.1 pyzmq 26.0.3 referencing 0.35.1 requests 2.32.2 retrying 1.3.3 rfc3339-validator 0.1.4 rfc3986-validator 0.1.1 rpds-py 0.18.1 scikit-base 0.7.8 scikit-learn 1.4.2 scipy 1.13.1 Send2Trash 1.8.3 setuptools 70.0.0 six 1.16.0 sktime 0.29.0 sniffio 1.3.1 soupsieve 2.5 SQLAlchemy 2.0.30 sqlite-anyio 0.2.0 stack-data 0.6.2 stanio 0.5.0 sympy 1.12 tenacity 8.3.0 tensorboard 2.16.2 tensorboard-data-server 0.7.2 terminado 0.18.1 threadpoolctl 3.5.0 tinycss2 1.3.0 tomli 2.0.1 torch 2.3.0 torchmetrics 1.4.0.post0 tornado 6.4 tqdm 4.66.4 traitlets 5.14.3 tsdownsample 0.1.3 types-python-dateutil 2.9.0.20240316 typing_extensions 4.11.0 typing-utils 0.1.0 tzdata 2024.1 uri-template 1.3.0 urllib3 2.2.1 wcwidth 0.2.13 webcolors 1.13 webencodings 0.5.1 websocket-client 1.8.0 Werkzeug 3.0.3 wheel 0.43.0 widgetsnbextension 4.0.11 yarl 1.9.4 zipp 3.17.0 ```
2024-06-13T21:34:51
microsoft/AzureTRE
156
microsoft__AzureTRE-156
[ "146" ]
14b5fd307403e8bb9671fef5d7aca3d41fb4f286
diff --git a/management_api_app/db/query_builder.py b/management_api_app/db/query_builder.py new file mode 100644 --- /dev/null +++ b/management_api_app/db/query_builder.py @@ -0,0 +1,26 @@ +from models.domain.resource import ResourceType + + +class QueryBuilder: + def __init__(self): + self._where_clauses = [] + + def select_active_resources(self, resource_type: ResourceType): + active_resources = f'c.resourceType = "{resource_type.value}" AND c.isDeleted = false' + self._where_clauses.append(active_resources) + return self + + def with_id(self, resource_id: str): + self._where_clauses.append(f'c.id = "{resource_id}"') + return self + + def build(self) -> str: + query = 'SELECT * FROM c' + if self._where_clauses: + query += ' WHERE ' + for clause in self._where_clauses: + query += clause + ' AND ' + if self._where_clauses: + # removes the final ' AND ' + query = query[:-5] + return query diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py --- a/management_api_app/db/repositories/workspaces.py +++ b/management_api_app/db/repositories/workspaces.py @@ -6,10 +6,10 @@ from core.config import STATE_STORE_RESOURCES_CONTAINER from db.errors import EntityDoesNotExist +from db.query_builder import QueryBuilder from db.repositories.base import BaseRepository from models.domain.resource import Resource, ResourceSpec, ResourceType, Status from models.schemas.resource import ResourceInCreate -from resources import strings class WorkspaceRepository(BaseRepository): @@ -21,17 +21,12 @@ def container(self) -> ContainerProxy: return self._container def get_all_active_workspaces(self) -> List[Resource]: - query = f'SELECT * from c ' \ - f'WHERE c.resourceType = "{strings.RESOURCE_TYPE_WORKSPACE}" ' \ - f'AND c.isDeleted = false' + query = QueryBuilder().select_active_resources(ResourceType.Workspace).build() workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True)) return workspaces def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Resource: - query = f'SELECT * from c ' \ - f'WHERE c.resourceType = "{strings.RESOURCE_TYPE_WORKSPACE}" ' \ - f'AND c.isDeleted = false ' \ - f'AND c.id = "{workspace_id}"' + query = QueryBuilder().select_active_resources(ResourceType.Workspace).with_id(workspace_id).build() workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True)) if workspaces:
diff --git a/management_api_app/tests/test_db/__init__.py b/management_api_app/tests/test_db/__init__.py new file mode 100644 diff --git a/management_api_app/tests/test_db/test_query_builder.py b/management_api_app/tests/test_db/test_query_builder.py new file mode 100644 --- /dev/null +++ b/management_api_app/tests/test_db/test_query_builder.py @@ -0,0 +1,20 @@ +from models.domain.resource import ResourceType +from db.query_builder import QueryBuilder + + +def test_query_builder_with_no_where_clauses_select_all() -> None: + expected_query = 'SELECT * FROM c' + actual_query = QueryBuilder().build() + assert expected_query == actual_query + + +def test_query_builder_selecting_active_resources() -> None: + expected_query = 'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.isDeleted = false' + actual_query = QueryBuilder().select_active_resources(ResourceType.Workspace).build() + assert expected_query == actual_query + + +def test_query_builder_selecting_active_resources_with_id() -> None: + expected_query = 'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.isDeleted = false AND c.id = "some_id"' + actual_query = QueryBuilder().select_active_resources(ResourceType.Workspace).with_id("some_id").build() + assert expected_query == actual_query
Implement query builder for queries comment from @sachinkundu IF we are always going to use isDeleted = false we should write a query builder which decorates the query string with an AND clause instead of remembering to put it everywhere
2021-06-01T17:53:18
microsoft/AzureTRE
176
microsoft__AzureTRE-176
[ "172" ]
d6f1a1740d42565a03353fa5014a4f5112a40466
diff --git a/management_api_app/core/config.py b/management_api_app/core/config.py --- a/management_api_app/core/config.py +++ b/management_api_app/core/config.py @@ -11,7 +11,7 @@ # Resource Info RESOURCE_LOCATION: str = config("RESOURCE_LOCATION", default="") -CORE_ID: str = config("CORE_ID", default="") +TRE_ID: str = config("TRE_ID", default="") # State store configuration STATE_STORE_ENDPOINT: str = config("STATE_STORE_ENDPOINT", default="") # Cosmos DB endpoint diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py --- a/management_api_app/db/repositories/workspaces.py +++ b/management_api_app/db/repositories/workspaces.py @@ -39,7 +39,7 @@ def create_workspace(self, workspace_create: WorkspaceInCreate) -> Workspace: resource_spec_parameters = { "location": config.RESOURCE_LOCATION, "workspace_id": "0001", # TODO: Calculate this value - Issue #166 - "core_id": config.CORE_ID, + "tre_id": config.TRE_ID, "address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52 } diff --git a/management_api_app/models/schemas/workspace.py b/management_api_app/models/schemas/workspace.py --- a/management_api_app/models/schemas/workspace.py +++ b/management_api_app/models/schemas/workspace.py @@ -14,7 +14,7 @@ def get_sample_workspace(workspace_id: str, spec_workspace_id: str = "0001") -> "resourceSpecParameters": { "location": "westeurope", "workspace_id": spec_workspace_id, - "core_id": "mytre-dev-1234", + "tre_id": "mytre-dev-1234", "address_space": "10.2.1.0/24" }, "status": "not_deployed",
Standardize TRE identifiers ## Description As a TRE developer I want naming of identifiers to be simple and standardized across the TRE So it will as intuitive as possible Currently we have Core ID, TRE ID and resource_name_prefix, which all are unique IDs for a TRE instance. ([Ref to code](https://github.com/microsoft/AzureTRE/blob/3cc8e14c6a16d5bb940f259dd5cb257e735e448b/templates/core/terraform/main.tf#L17)) They are used to ensure no clashes between names, but having a single identifier is sufficient. ### A simplified solution When creating a TRE instance, a unique identifier is needed, to make sure no clashes occur. That identifier should be named TRE_ID and can be up to 10 chars long (Alphanumeric, underscore, and hyphen). If the Cloud Administrator wants to use a specific naming convention e.g. one that includes environment, the Cloud Administrator can do so. Examples of a TRE_ID: - mytre - msfttre-dev - tre123 Hench the TRE_ID is an unique identifier for the TRE instance replacing the Core ID, which consisted of TRE ID + resource_name_prefix. ## Acceptance criteria - [x] TRE provisioning script uses the TRE ID as the TRE instance name, hence creates the cross-cutting services in a ressource group with the name of TRE ID e.g. mytre - [x] TRE provisioning script does not require environment parameter - [x] Workspace bundle uses TRE_ID (not Core ID as now) as the identifier for the TRE instance
2021-06-03T20:50:51
microsoft/AzureTRE
217
microsoft__AzureTRE-217
[ "216" ]
0584832f8325453a228b76c540880f90fc6f70e1
diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py --- a/management_api_app/db/repositories/workspaces.py +++ b/management_api_app/db/repositories/workspaces.py @@ -46,7 +46,7 @@ def create_workspace_item(self, workspace_create: WorkspaceInCreate) -> Workspac raise ValueError(f"The workspace type '{workspace_create.workspaceType}' does not exist") resource_spec_parameters = { - "location": config.RESOURCE_LOCATION, + "azure_location": config.RESOURCE_LOCATION, "workspace_id": full_workspace_id[-4:], "tre_id": config.TRE_ID, "address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52 diff --git a/management_api_app/models/schemas/workspace_template.py b/management_api_app/models/schemas/workspace_template.py --- a/management_api_app/models/schemas/workspace_template.py +++ b/management_api_app/models/schemas/workspace_template.py @@ -12,7 +12,7 @@ def get_sample_workspace_template_object(template_name: str = "tre-workspace-van description="vanilla workspace bundle", version="0.1.0", parameters=[ - Parameter(name="location", type="string"), + Parameter(name="azure_location", type="string"), Parameter(name="tre_id", type="string"), Parameter(name="workspace_id", type="string"), Parameter(name="address_space", type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
diff --git a/management_api_app/tests/test_db/test_repositories/test_workpaces_repository.py b/management_api_app/tests/test_db/test_repositories/test_workpaces_repository.py --- a/management_api_app/tests/test_db/test_repositories/test_workpaces_repository.py +++ b/management_api_app/tests/test_db/test_repositories/test_workpaces_repository.py @@ -61,7 +61,7 @@ def test_create_workspace_item_creates_a_workspace_with_the_right_values(cosmos_ assert workspace.resourceTemplateName == workspace_type assert workspace.resourceType == ResourceType.Workspace assert workspace.status == Status.NotDeployed - assert "location" in workspace.resourceTemplateParameters + assert "azure_location" in workspace.resourceTemplateParameters assert "workspace_id" in workspace.resourceTemplateParameters assert "tre_id" in workspace.resourceTemplateParameters assert "address_space" in workspace.resourceTemplateParameters diff --git a/scripts/bootstrap_db_for_local_tests.py b/scripts/bootstrap_db_for_local_tests.py --- a/scripts/bootstrap_db_for_local_tests.py +++ b/scripts/bootstrap_db_for_local_tests.py @@ -18,16 +18,20 @@ def create_workspace_resource(resource_id: str): return { "id": resource_id, - "resourceSpec": { - "name": "tre-workspace-vanilla", - "version": "0.1.0" + "displayName": "My workspace", + "description": "workspace for team X", + "resourceTemplateName": "tre-workspace-vanilla", + "resourceTemplateVersion": "0.1.0", + "resourceTemplateParameters": { + "azure_location": "westeurope", + "workspace_id": "f4a6", + "tre_id": "mytre-dev-3142", + "address_space": "10.2.1.0/24" }, - "parameters": { - "location": "europe" - }, - "resourceType": "workspace", "status": "not_deployed", - "isDeleted": False + "isDeleted": False, + "workspaceURL": "", + "resourceType": "workspace" }
[Task] Change location to azure_location for porter parameters from API **Description** Change location to azure_location for porter parameters from API - as location appears to be a reserved word for porter
2021-06-08T13:13:23
microsoft/AzureTRE
241
microsoft__AzureTRE-241
[ "239" ]
5457413002e9ef3693da860d24af4f1d2280be38
diff --git a/management_api_app/db/repositories/workspace_templates.py b/management_api_app/db/repositories/workspace_templates.py --- a/management_api_app/db/repositories/workspace_templates.py +++ b/management_api_app/db/repositories/workspace_templates.py @@ -50,7 +50,7 @@ def create_workspace_template_item(self, workspace_template_create: WorkspaceTem name=workspace_template_create.name, description=workspace_template_create.description, version=workspace_template_create.version, - properties=workspace_template_create.properties, + parameters=workspace_template_create.parameters, resourceType=workspace_template_create.resourceType, current=workspace_template_create.current ) diff --git a/management_api_app/models/domain/resource_template.py b/management_api_app/models/domain/resource_template.py --- a/management_api_app/models/domain/resource_template.py +++ b/management_api_app/models/domain/resource_template.py @@ -20,6 +20,6 @@ class ResourceTemplate(AzureTREModel): name: str = Field(title="Unique template name") description: str = Field(title="Template description") version: str = Field(title="Template version") - properties: List[dict] = Field(title="Template parameters") + parameters: List[dict] = Field(title="Template parameters") resourceType: ResourceType = Field(title="Type of resource this template is for (workspace/service)") current: bool = Field(title="Is this the current version of this template") diff --git a/management_api_app/models/schemas/workspace_template.py b/management_api_app/models/schemas/workspace_template.py --- a/management_api_app/models/schemas/workspace_template.py +++ b/management_api_app/models/schemas/workspace_template.py @@ -11,7 +11,7 @@ def get_sample_workspace_template_object(template_name: str = "tre-workspace-van name=template_name, description="vanilla workspace bundle", version="0.1.0", - properties=[ + parameters=[ Parameter(name="azure_location", type="string"), Parameter(name="tre_id", type="string"), Parameter(name="workspace_id", type="string"), @@ -42,8 +42,8 @@ class WorkspaceTemplateInCreate(BaseModel): name: str = Field(title="Name of workspace template") version: str = Field(title="Version of workspace template") description: str = Field(title=" Description of workspace template") - properties: List[dict] = Field([{}], title="Workspace template properties", - description="Values for the properties required by the workspace template") + parameters: List[dict] = Field([{}], title="Workspace template parameters", + description="Values for the parameters required by the workspace template") resourceType: str = Field(title="Type of workspace template") current: bool = Field(title="Mark this version as current") @@ -53,7 +53,7 @@ class Config: "name": "my-tre-workspace", "version": "0.0.1", "description": "workspace template for great product", - "properties": [{ + "parameters": [{ "name": "azure_location", "type": "string" }],
diff --git a/management_api_app/tests/test_api/test_routes/test_workspace_templates.py b/management_api_app/tests/test_api/test_routes/test_workspace_templates.py --- a/management_api_app/tests/test_api/test_routes/test_workspace_templates.py +++ b/management_api_app/tests/test_api/test_routes/test_workspace_templates.py @@ -17,7 +17,7 @@ "name": "my-tre-workspace", "version": "0.0.1", "description": "workspace template for great product", - "properties": [{"blah": "blah"}], + "parameters": [{"blah": "blah"}], "resourceType": "workspace", "current": True } diff --git a/management_api_app/tests/test_db/test_repositories/test_workspace_templates_repository.py b/management_api_app/tests/test_db/test_repositories/test_workspace_templates_repository.py --- a/management_api_app/tests/test_db/test_repositories/test_workspace_templates_repository.py +++ b/management_api_app/tests/test_db/test_repositories/test_workspace_templates_repository.py @@ -15,7 +15,7 @@ def get_sample_workspace_template(name: str, version: str = "1.0") -> ResourceTe description="some description", version=version, resourceType=ResourceType.Workspace, - properties=[], + parameters=[], current=False ) @@ -147,7 +147,7 @@ def test_create_item(cosmos_mock, uuid_mock, create_mock): description="some description", version="0.0.1", resourceType=ResourceType.Workspace, - properties=[], + parameters=[], current=False ) returned_template = template_repo.create_workspace_template_item(payload) @@ -157,7 +157,7 @@ def test_create_item(cosmos_mock, uuid_mock, create_mock): description="some description", version="0.0.1", resourceType=ResourceType.Workspace, - properties=[], + parameters=[], current=False ) create_mock.assert_called_once_with(expected_resouce_template)
[BUG] ResourceTemplates have properties - should be parameters Rename properties to parameters
2021-06-10T09:14:21
microsoft/AzureTRE
371
microsoft__AzureTRE-371
[ "360" ]
3cf26a175d49b220a47a0e615ee91c24ed4577ef
diff --git a/management_api_app/resources/strings.py b/management_api_app/resources/strings.py --- a/management_api_app/resources/strings.py +++ b/management_api_app/resources/strings.py @@ -51,7 +51,7 @@ # Service bus SERVICE_BUS_GENERAL_ERROR_MESSAGE = "Service bus failure" -DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT = "Service bus message is not formatted correctly." +DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT = "Service bus message is not formatted correctly" DEPLOYMENT_STATUS_ID_NOT_FOUND = "Service bus message refers to resource id = {} which does not exist" # Workspace creation validation diff --git a/management_api_app/service_bus/deployment_status_update.py b/management_api_app/service_bus/deployment_status_update.py --- a/management_api_app/service_bus/deployment_status_update.py +++ b/management_api_app/service_bus/deployment_status_update.py @@ -48,8 +48,8 @@ async def receive_message(): try: message = json.loads(str(msg)) result = (yield parse_obj_as(DeploymentStatusUpdateMessage, message)) - except (json.JSONDecodeError, ValidationError): - logging.error(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT) + except (json.JSONDecodeError, ValidationError) as e: + logging.error(f"{strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT}: {e}") if result: logging.info(f"Received deployment status update message with correlation ID {msg.correlation_id}: {message}")
diff --git a/management_api_app/tests/test_service_bus/test_deployment_status_update.py b/management_api_app/tests/test_service_bus/test_deployment_status_update.py --- a/management_api_app/tests/test_service_bus/test_deployment_status_update.py +++ b/management_api_app/tests/test_service_bus/test_deployment_status_update.py @@ -2,7 +2,6 @@ import pytest import uuid -from azure.servicebus import ServiceBusMessage from mock import AsyncMock, patch from db.errors import EntityDoesNotExist @@ -12,17 +11,28 @@ from resources import strings from service_bus.deployment_status_update import receive_message_and_update_deployment + pytestmark = pytest.mark.asyncio -invalid_service_bus_message = ServiceBusMessage(body='{\'bad\': \'bad\'}', correlation_id="123") +test_data = [ + 'bad', + '{"good": "json", "bad": "message"}' +] -valid_service_bus_message_body = { +test_sb_message = { "id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", "status": Status.Deployed, "message": "test message" } -valid_service_bus_message = ServiceBusMessage(body=json.dumps(valid_service_bus_message_body), correlation_id="456") + +class ServiceBusReceivedMessageMock: + def __init__(self, message: dict): + self.message = json.dumps(message) + self.correlation_id = "test_correlation_id" + + def __str__(self): + return self.message def create_sample_workspace_object(workspace_id): @@ -36,17 +46,21 @@ def create_sample_workspace_object(workspace_id): ) [email protected]("payload", test_data) @patch('logging.error') @patch('service_bus.deployment_status_update.ServiceBusClient') @patch('fastapi.FastAPI') -async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock): - sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[invalid_service_bus_message]) +async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock, payload): + service_bus_received_message_mock = ServiceBusReceivedMessageMock(payload) + + sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock]) sb_client().get_queue_receiver().complete_message = AsyncMock() await receive_message_and_update_deployment(app) - logging_mock.assert_called_once_with(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT) - sb_client().get_queue_receiver().complete_message.assert_called_once_with(invalid_service_bus_message) + error_message = logging_mock.call_args.args[0] + assert error_message.startswith(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT) + sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock) @patch('service_bus.deployment_status_update.WorkspaceRepository') @@ -54,16 +68,18 @@ async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock): @patch('service_bus.deployment_status_update.ServiceBusClient') @patch('fastapi.FastAPI') async def test_receiving_good_message(app, sb_client, logging_mock, repo): - sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[valid_service_bus_message]) + service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message) + + sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock]) sb_client().get_queue_receiver().complete_message = AsyncMock() - expected_workspace = create_sample_workspace_object(valid_service_bus_message_body["id"]) + expected_workspace = create_sample_workspace_object(test_sb_message["id"]) repo().get_workspace_by_workspace_id.return_value = expected_workspace await receive_message_and_update_deployment(app) - repo().get_workspace_by_workspace_id.assert_called_once_with(uuid.UUID(valid_service_bus_message_body["id"])) + repo().get_workspace_by_workspace_id.assert_called_once_with(uuid.UUID(test_sb_message["id"])) repo().update_workspace.assert_called_once_with(expected_workspace) logging_mock.assert_not_called() - sb_client().get_queue_receiver().complete_message.assert_called_once_with(valid_service_bus_message) + sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock) @patch('service_bus.deployment_status_update.WorkspaceRepository') @@ -71,14 +87,16 @@ async def test_receiving_good_message(app, sb_client, logging_mock, repo): @patch('service_bus.deployment_status_update.ServiceBusClient') @patch('fastapi.FastAPI') async def test_when_updating_non_existent_workspace_error_is_logged(app, sb_client, logging_mock, repo): - sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[valid_service_bus_message]) + service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message) + + sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock]) sb_client().get_queue_receiver().complete_message = AsyncMock() repo().get_workspace_by_workspace_id.side_effect = EntityDoesNotExist await receive_message_and_update_deployment(app) - expected_error_message = strings.DEPLOYMENT_STATUS_ID_NOT_FOUND.format(valid_service_bus_message_body["id"]) + expected_error_message = strings.DEPLOYMENT_STATUS_ID_NOT_FOUND.format(test_sb_message["id"]) logging_mock.assert_called_once_with(expected_error_message) - sb_client().get_queue_receiver().complete_message.assert_called_once_with(valid_service_bus_message) + sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock) @patch('service_bus.deployment_status_update.WorkspaceRepository') @@ -86,7 +104,9 @@ async def test_when_updating_non_existent_workspace_error_is_logged(app, sb_clie @patch('service_bus.deployment_status_update.ServiceBusClient') @patch('fastapi.FastAPI') async def test_when_updating_and_state_store_exception(app, sb_client, logging_mock, repo): - sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[valid_service_bus_message]) + service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message) + + sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock]) sb_client().get_queue_receiver().complete_message = AsyncMock() repo().get_workspace_by_workspace_id.side_effect = Exception
[BUG] Management API unit tests raise errors Management API unit tests raise errors, but the tests pass. The errors need to be removed and the output needs to be clean.
2021-06-24T12:23:29
microsoft/AzureTRE
395
microsoft__AzureTRE-395
[ "387" ]
3700cc38ab682c3aebf68a813438997479cf8882
diff --git a/processor_function/shared/logging.py b/processor_function/shared/logging.py --- a/processor_function/shared/logging.py +++ b/processor_function/shared/logging.py @@ -18,10 +18,10 @@ def initialize_logging(logging_level: int, correlation_id: str) -> logging.Logge """ logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) # For logging into console - app_insights_instrumentation_key = os.getenv("APP_INSIGHTS_INSTRUMENTATION_KEY") + app_insights_connection_string = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") try: - logger.addHandler(AzureLogHandler(connection_string=f"InstrumentationKey={app_insights_instrumentation_key}")) + logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string)) except ValueError as e: logger.error(f"Failed to set Application Insights logger handler: {e}")
Application Insights configuration for processor function broken The `createNewWorkspace` function monitoring blade indicates that Application Insights integration is not enabled. ![image](https://user-images.githubusercontent.com/166592/123793166-b7edfa80-d8e1-11eb-8906-b4175f4348e0.png) At a first glance, the config seems to be there in [processor_function/function.tf](https://github.com/microsoft/AzureTRE/blob/develop/templates/core/terraform/processor_function/function.tf#L24) ```plaintext APP_INSIGHTS_INSTRUMENTATION_KEY = var.app_insights_instrumentation_key ``` however, the setting key name is incorrect. Manually enabling the integration in the portal adds additional config keys: `APPLICATIONINSIGHTS_CONNECTION_STRING` `APPINSIGHTS_INSTRUMENTATIONKEY` Need to add above keys with values to function app config. [Reference](https://docs.microsoft.com/en-us/azure/azure-monitor/app/resource-manager-function-app)
2021-06-30T09:46:12
microsoft/AzureTRE
397
microsoft__AzureTRE-397
[ "389" ]
3700cc38ab682c3aebf68a813438997479cf8882
diff --git a/management_api_app/main.py b/management_api_app/main.py --- a/management_api_app/main.py +++ b/management_api_app/main.py @@ -4,7 +4,6 @@ from fastapi import FastAPI from fastapi.exceptions import RequestValidationError from fastapi_utils.tasks import repeat_every -from opencensus.ext.azure.log_exporter import AzureLogHandler from starlette.exceptions import HTTPException from starlette.middleware.errors import ServerErrorMiddleware @@ -14,6 +13,7 @@ from api.errors.generic_error import generic_error_handler from core import config from core.events import create_start_app_handler, create_stop_app_handler +from services.logging import disable_unwanted_loggers, initialize_logging from service_bus.deployment_status_update import receive_message_and_update_deployment @@ -40,46 +40,6 @@ def get_application() -> FastAPI: return application -def initialize_logging(logging_level: int): - """ - Adds the Application Insights handler for the root logger and sets the given logging level. - - :param logging_level: The logging level to set e.g., logging.WARNING. - """ - logger = logging.getLogger() - - logging.getLogger("azure.core.pipeline.policies.http_logging_policy").disabled = True - - logging.getLogger("azure.eventhub._eventprocessor.event_processor").disabled = True - - logging.getLogger("azure.identity.aio._credentials.managed_identity").disabled = True - logging.getLogger("azure.identity.aio._credentials.environment").disabled = True - logging.getLogger("azure.identity.aio._internal.get_token_mixin").disabled = True - logging.getLogger("azure.identity.aio._internal.decorators").disabled = True - logging.getLogger("azure.identity.aio._credentials.chained").disabled = True - logging.getLogger("azure.identity").disabled = True - - logging.getLogger("msal.token_cache").disabled = True - - logging.getLogger("uamqp").disabled = True - logging.getLogger("uamqp.authentication.cbs_auth_async").disabled = True - logging.getLogger("uamqp.async_ops.client_async").disabled = True - logging.getLogger("uamqp.async_ops.connection_async").disabled = True - logging.getLogger("uamqp.async_ops").disabled = True - logging.getLogger("uamqp.authentication").disabled = True - logging.getLogger("uamqp.c_uamqp").disabled = True - logging.getLogger("uamqp.connection").disabled = True - logging.getLogger("uamqp.receiver").disabled = True - - try: - logger.addHandler(AzureLogHandler(connection_string=f"InstrumentationKey={config.APP_INSIGHTS_INSTRUMENTATION_KEY}")) - except ValueError: - logger.error("Application Insights instrumentation key missing or invalid") - - logging.basicConfig(level=logging_level) - logger.setLevel(logging_level) - - app = get_application() @@ -90,6 +50,8 @@ async def initialize_logging_on_startup(): else: initialize_logging(logging.INFO) + disable_unwanted_loggers() + @app.on_event("startup") @repeat_every(seconds=20, wait_first=True, logger=logging.getLogger()) diff --git a/management_api_app/services/logging.py b/management_api_app/services/logging.py new file mode 100644 --- /dev/null +++ b/management_api_app/services/logging.py @@ -0,0 +1,71 @@ +import logging +import os + +from opencensus.ext.azure.log_exporter import AzureLogHandler +from opencensus.trace import config_integration +from opencensus.trace.samplers import AlwaysOnSampler +from opencensus.trace.tracer import Tracer + + +UNWANTED_LOGGERS = [ + "azure.core.pipeline.policies.http_logging_policy", + "azure.eventhub._eventprocessor.event_processor", + "azure.identity.aio._credentials.managed_identity", + "azure.identity.aio._credentials.environment", + "azure.identity.aio._internal.get_token_mixin", + "azure.identity.aio._internal.decorators", + "azure.identity.aio._credentials.chained", + "azure.identity", + "msal.token_cache", + "uamqp", + "uamqp.authentication.cbs_auth_async", + "uamqp.async_ops.client_async", + "uamqp.async_ops.connection_async", + "uamqp.async_ops", + "uamqp.authentication", + "uamqp.c_uamqp", + "uamqp.connection", + "uamqp.receiver" +] + + +def disable_unwanted_loggers(): + """ + Disables the unwanted loggers. + """ + for logger_name in UNWANTED_LOGGERS: + logging.getLogger(logger_name).disabled = True + + +def initialize_logging(logging_level: int, correlation_id: str = None) -> logging.LoggerAdapter: + """ + Adds the Application Insights handler for the root logger and sets the given logging level. + Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages. + + :param logging_level: The logging level to set e.g., logging.WARNING. + :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights. + :returns: A newly created logger adapter. + """ + logger = logging.getLogger() + logger.addHandler(logging.StreamHandler()) # For logging into console + app_insights_connection_string = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") + + try: + logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string)) + except ValueError as e: + logger.error(f"Failed to set Application Insights logger handler: {e}") + + config_integration.trace_integrations(['logging']) + logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s') + Tracer(sampler=AlwaysOnSampler()) + logger.setLevel(logging_level) + + extra = None + + if correlation_id: + extra = {'traceId': correlation_id} + + adapter = logging.LoggerAdapter(logger, extra) + adapter.debug(f"Logger adapter initialized with extra: {extra}") + + return adapter
Application insights configuration for API missing extension configuration When viewing the Application Insights blade in the portal for the API web app, indication is given that the integration is not fully enabled. ![image](https://user-images.githubusercontent.com/166592/123794341-03ed6f00-d8e3-11eb-9e09-32a2e5dec51b.png) The instrumentation key is set why some data is sent to the logs but the full integration is not enabled. Manually enabling the integration from the portal adds additional configuration keys and values to the web app config. Add missing config keys and values to enable the full integration. `APPLICATIONINSIGHTS_CONNECTION_STRING` `ApplicationInsightsAgent_EXTENSION_VERSION ~3` `XDT_MicrosoftApplicationInsights_BaseExtensions` et al. [Reference](https://docs.microsoft.com/en-us/azure/azure-monitor/app/resource-manager-web-app)
2021-06-30T12:42:08
microsoft/AzureTRE
524
microsoft__AzureTRE-524
[ "515" ]
ddfca3758d9fa55fbd8942ae5c281863eed140a0
diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py --- a/processor_function/vm_porter/runner.py +++ b/processor_function/vm_porter/runner.py @@ -33,7 +33,7 @@ async def receive_message(env_vars, service_bus_client): """ async with service_bus_client: q_name = env_vars["resource_request_queue"] - renewer = AutoLockRenewer() + renewer = AutoLockRenewer(max_lock_renewal_duration=1800) receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer) async with receiver:
[BUG] Service bus message times out on deployment of workspace template **Describe the bug** When deploying a template that takes > 10 minutes, although deployment is successful the status is not updated. **Steps to reproduce** 1. Register and deploy the `azureml_devtestlabs` workspace 2. Log on to the VMSS resource processor using bastion 3. View the docker logs, wait until deployment is complete, and see similar to: `LinkDetach("ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.")`
2021-07-30T10:12:18
microsoft/AzureTRE
593
microsoft__AzureTRE-593
[ "345" ]
1d3ccd4bb62c57df1f5ce4fb88572d9f69e1b9d0
diff --git a/management_api_app/api/dependencies/database.py b/management_api_app/api/dependencies/database.py --- a/management_api_app/api/dependencies/database.py +++ b/management_api_app/api/dependencies/database.py @@ -2,6 +2,8 @@ from typing import Callable, Type from azure.cosmos import CosmosClient +from azure.identity import DefaultAzureCredential +from azure.mgmt.cosmosdb import CosmosDBManagementClient from fastapi import Depends, FastAPI, HTTPException from starlette.requests import Request from starlette.status import HTTP_503_SERVICE_UNAVAILABLE @@ -16,18 +18,30 @@ def connect_to_db() -> CosmosClient: logging.debug(f"Connecting to {config.STATE_STORE_ENDPOINT}") try: + primary_master_key = get_store_key() if config.DEBUG: # ignore TLS(setup is pain) when on dev container and connecting to cosmosdb on windows host. - cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY, - connection_verify=False) + cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, primary_master_key, connection_verify=False) else: - cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY) + cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, primary_master_key) logging.debug("Connection established") return cosmos_client except Exception as e: logging.debug(f"Connection to state store could not be established: {e}") +def get_store_key() -> str: + if config.STATE_STORE_KEY: + primary_master_key = config.STATE_STORE_KEY + else: + credential = DefaultAzureCredential(managed_identity_client_id=config.MANAGED_IDENTITY_CLIENT_ID, exclude_shared_token_cache_credential=True) + cosmosdb_client = CosmosDBManagementClient(credential, subscription_id=config.SUBSCRIPTION_ID) + database_keys = cosmosdb_client.database_accounts.list_keys(resource_group_name=config.RESOURCE_GROUP_NAME, account_name=config.COSMOSDB_ACCOUNT_NAME) + primary_master_key = database_keys.primary_master_key + + return primary_master_key + + def get_db_client(app: FastAPI) -> CosmosClient: if not app.state.cosmos_client: app.state.cosmos_client = connect_to_db() diff --git a/management_api_app/core/config.py b/management_api_app/core/config.py --- a/management_api_app/core/config.py +++ b/management_api_app/core/config.py @@ -16,9 +16,13 @@ # State store configuration STATE_STORE_ENDPOINT: str = config("STATE_STORE_ENDPOINT", default="") # Cosmos DB endpoint STATE_STORE_KEY: str = config("STATE_STORE_KEY", default="") # Cosmos DB access key +COSMOSDB_ACCOUNT_NAME: str = config("COSMOSDB_ACCOUNT_NAME", default="") # Cosmos DB account name STATE_STORE_DATABASE = "AzureTRE" STATE_STORE_RESOURCES_CONTAINER = "Resources" STATE_STORE_RESOURCE_TEMPLATES_CONTAINER = "ResourceTemplates" +SUBSCRIPTION_ID: str = config("SUBSCRIPTION_ID", default="") +RESOURCE_GROUP_NAME: str = config("RESOURCE_GROUP_NAME", default="") + # Service bus configuration SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE: str = config("SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE", default="") diff --git a/management_api_app/services/health_checker.py b/management_api_app/services/health_checker.py --- a/management_api_app/services/health_checker.py +++ b/management_api_app/services/health_checker.py @@ -1,7 +1,8 @@ from azure.core import exceptions from azure.cosmos import CosmosClient -from core.config import STATE_STORE_ENDPOINT, STATE_STORE_KEY +from api.dependencies.database import get_store_key +from core import config from models.schemas.status import StatusEnum from resources import strings @@ -10,7 +11,8 @@ def create_state_store_status() -> (StatusEnum, str): status = StatusEnum.ok message = "" try: - client = CosmosClient(STATE_STORE_ENDPOINT, STATE_STORE_KEY) # noqa: F841 - flake 8 client is not used + primary_master_key = get_store_key() + client = CosmosClient(config.STATE_STORE_ENDPOINT, primary_master_key) # noqa: F841 - flake 8 client is not used except exceptions.ServiceRequestError: status = StatusEnum.not_ok message = strings.STATE_STORE_ENDPOINT_NOT_RESPONDING
diff --git a/management_api_app/tests_ma/test_services/test_health_checker.py b/management_api_app/tests_ma/test_services/test_health_checker.py --- a/management_api_app/tests_ma/test_services/test_health_checker.py +++ b/management_api_app/tests_ma/test_services/test_health_checker.py @@ -7,7 +7,9 @@ @patch("services.health_checker.CosmosClient") -def test_get_state_store_status_responding(cosmos_client_mock) -> None: +@patch("services.health_checker.get_store_key") +def test_get_state_store_status_responding(cosmos_client_mock, get_store_key_mock) -> None: + get_store_key_mock.return_value = None cosmos_client_mock.return_value = None status, message = health_checker.create_state_store_status() @@ -17,7 +19,9 @@ def test_get_state_store_status_responding(cosmos_client_mock) -> None: @patch("services.health_checker.CosmosClient") -def test_get_state_store_status_not_responding(cosmos_client_mock) -> None: +@patch("services.health_checker.get_store_key") +def test_get_state_store_status_not_responding(cosmos_client_mock, get_store_key_mock) -> None: + get_store_key_mock.return_value = None cosmos_client_mock.return_value = None cosmos_client_mock.side_effect = ServiceRequestError(message="some message") @@ -28,7 +32,9 @@ def test_get_state_store_status_not_responding(cosmos_client_mock) -> None: @patch("services.health_checker.CosmosClient") -def test_get_state_store_status_other_exception(cosmos_client_mock) -> None: +@patch("services.health_checker.get_store_key") +def test_get_state_store_status_other_exception(cosmos_client_mock, get_store_key_mock) -> None: + get_store_key_mock.return_value = None cosmos_client_mock.return_value = None cosmos_client_mock.side_effect = Exception()
[Task] API to use Managed Identity to authenticate against Cosmos DB **Is your feature request related to a problem? Currently the API use an API key `STATE_STORE_KEY` to authenticate against Cosmos DB. The API uses Managed Identity to authenticate against Azure Service Bus. **Describe the solution you'd like Using the same simplified access pattern to authenticate from the API to Azure resource. Note: looks like Cosmos data-plane doesn't really support managed identity (in Python at least) but there might be a way to do this: https://github.com/MicrosoftDocs/azure-docs/issues/29762 https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/tutorial-linux-vm-access-cosmos-db
This is the best I came up with: ```python from azure.cosmos import CosmosClient from azure.identity import DefaultAzureCredential from azure.mgmt.cosmosdb import CosmosDBManagementClient subscription_id = "..." resource_group = "..." account_name = "..." credential = DefaultAzureCredential() dbmgmt = CosmosDBManagementClient(credential, subscription_id, "https://management.azure.com") keys = dbmgmt.database_accounts.list_keys(resource_group, account_name) client = CosmosClient(url="...", credential=keys.primary_master_key) ```
2021-08-11T07:04:07
microsoft/AzureTRE
633
microsoft__AzureTRE-633
[ "627" ]
42e3984e2314ca903eab6348eb7697dbafd18714
diff --git a/resource_processor/shared/logging.py b/resource_processor/shared/logging.py --- a/resource_processor/shared/logging.py +++ b/resource_processor/shared/logging.py @@ -40,6 +40,7 @@ def initialize_logging(logging_level: int, correlation_id: str) -> logging.Logge """ Adds the Application Insights handler for the root logger and sets the given logging level. Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages. + Note: This should be called only once, otherwise duplicate log entries could be produced. :param logging_level: The logging level to set e.g., logging.WARNING. :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights. @@ -68,3 +69,21 @@ def initialize_logging(logging_level: int, correlation_id: str) -> logging.Logge adapter.debug(f"Logger adapter initialized with extra: {extra}") return adapter + + +def get_message_id_logger(correlation_id: str) -> logging.LoggerAdapter: + """ + Gets a logger that includes message id for easy correlation between log entries. + :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights. + :returns: A modified logger adapter (from the original initiated one). + """ + logger = logging.getLogger() + extra = None + + if correlation_id: + extra = {'traceId': correlation_id} + + adapter = logging.LoggerAdapter(logger, extra) + adapter.debug(f"Logger adapter now includes extra: {extra}") + + return adapter diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -5,7 +5,7 @@ import asyncio import logging -from shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa +from shared.logging import disable_unwanted_loggers, initialize_logging, get_message_id_logger # pylint: disable=import-error # noqa from resources import strings # pylint: disable=import-error # noqa from contextlib import asynccontextmanager from azure.servicebus import ServiceBusMessage @@ -199,7 +199,7 @@ async def runner(env_vars): try: async for message in receive_message_gen: logger_adapter.info(f"Message received for id={message['id']}") - message_logger_adapter = initialize_logging(logging.INFO, message['id']) + message_logger_adapter = get_message_id_logger(message['id']) # logger includes message id in every entry. result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter) await receive_message_gen.asend(result) except StopAsyncIteration: # the async generator when finished signals end with this exception.
[BUG] Resource processor outputs duplicate log items **Describe the bug** The VMSS resource processor outputs many duplicate log items (some twice, some 3 times, 4, etc.) **Steps to reproduce** 1. Make a query in app insights and look for rolename=runner.py 2. Make actions that require the RP to do something - create a workspace for instance 3. See that some of the logs entries appear multiple times
2021-08-15T09:53:41
microsoft/AzureTRE
670
microsoft__AzureTRE-670
[ "585" ]
7769287771e3a7aa1f701bc8707cf30132141015
diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -99,7 +99,7 @@ async def build_porter_command(msg_body, env_vars): porter_keys = await filter_parameters_not_needed_by_porter(msg_body, env_vars) for parameter in porter_keys: - porter_parameters = porter_parameters + f" --param {parameter}={msg_body['parameters'][parameter]}" + porter_parameters = porter_parameters + f" --param {parameter}=\"{msg_body['parameters'][parameter]}\"" installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id'] @@ -109,7 +109,7 @@ async def build_porter_command(msg_body, env_vars): porter_parameters = porter_parameters + f" --param arm_use_msi={env_vars['arm_use_msi']}" command_line = [f"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter " - f"{msg_body['action']} {installation_id} " + f"{msg_body['action']} \"{installation_id}\" " f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}" f" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access" f" && porter show {installation_id}"] @@ -174,7 +174,7 @@ async def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_ada porter_command = await build_porter_command(msg_body, env_vars) returncode, _, err = await run_porter(porter_command, env_vars) if returncode != 0: - error_message = "Error context message = " + " ".join(err.split('\n')) + error_message = "Error context message = " + " ".join(err.split('\n')) + " ; Command executed: ".join(porter_command) resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message) await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"])) message_logger_adapter.info(f"{installation_id}: Deployment job configuration failed error = {error_message}")
[BUG] Failed workspace deployment: only one positional argument may be specified, the installation name, but multiple were received **Describe the bug** Workspace deployment failed with: ``` only one positional argument may be specified, the installation name, but multiple were received [mrtre17-9c1d X secure project] ``` **Steps to reproduce** Deployed a workspace with following values: ``` "azure_location":"westeurope", "workspace_id":"9c1d", "tre_id":"mrtre17", "address_space":"192.168.10.0/24", "display_name":"Project X", "description":"Very secure project" ```
This seems to be caused by parameter values not being enclosed with quotations in the porter command. I saw the same and believe it's about the description having multiple words - one word works just fine.
2021-08-18T17:46:15
microsoft/AzureTRE
854
microsoft__AzureTRE-854
[ "794" ]
69b4dfaaf92e3cd6599c689a9f398981019fdd34
diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -139,7 +139,7 @@ async def build_porter_command(msg_body, env_vars): command_line = [f"{azure_login_command(env_vars)} && {azure_acr_login_command(env_vars)} && porter " f"{msg_body['action']} \"{installation_id}\" " f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}" - f" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access" + f" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access --force" f" && porter show {installation_id}"] return command_line
[Task] Modify InnerEye Deeplearning service so it can be registered with API Story: #694 **Description** Modify InnerEye Deeplearning service so template can be registered with publish_register_bundle.sh
2021-09-15T07:30:35
microsoft/AzureTRE
951
microsoft__AzureTRE-951
[ "859" ]
9bc39d486366b9bb7f20548b8e93f0d65075433d
diff --git a/api_app/_version.py b/api_app/_version.py new file mode 100644 --- /dev/null +++ b/api_app/_version.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/api_app/core/config.py b/api_app/core/config.py --- a/api_app/core/config.py +++ b/api_app/core/config.py @@ -1,5 +1,5 @@ from starlette.config import Config - +from _version import __version__ config = Config(".env") @@ -7,7 +7,7 @@ API_PREFIX = "/api" PROJECT_NAME: str = config("PROJECT_NAME", default="Azure TRE API") DEBUG: bool = config("DEBUG", cast=bool, default=False) -VERSION = "0.1.0" +VERSION = __version__ API_DESCRIPTION = "Welcome to the Azure TRE API - for more information about templates and workspaces see the [Azure TRE documentation](https://github.com/microsoft/AzureTRE/blob/main/docs/concepts.md)" # Resource Info
diff --git a/e2e_tests/test_workspace_service_creation.py b/e2e_tests/test_workspace_service_creation.py --- a/e2e_tests/test_workspace_service_creation.py +++ b/e2e_tests/test_workspace_service_creation.py @@ -13,8 +13,7 @@ workspace_service_templates = [ (strings.AZUREML_SERVICE), (strings.DEVTESTLABS_SERVICE), - (strings.GUACAMOLE_SERVICE), - (strings.INNEREYE) + (strings.GUACAMOLE_SERVICE) ]
Version the API images (tag)
Should the docker image tag version match the API version. Using semantic versioning, then the build number could be the actual github build action number. - Changes to the API must trigger a version update e.g. 1.1 -> 1.2 - Fixes to business logic (hence a new docker image) would be okay only updating the build number
2021-09-28T18:08:26
microsoft/AzureTRE
965
microsoft__AzureTRE-965
[ "964" ]
2464765ffb2291dbe5810de89debfcb780bd7f0e
diff --git a/api_app/models/schemas/user_resource_template.py b/api_app/models/schemas/user_resource_template.py --- a/api_app/models/schemas/user_resource_template.py +++ b/api_app/models/schemas/user_resource_template.py @@ -45,8 +45,8 @@ class Config: "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json", "type": "object", - "title": "My User Resource Template Custom Parameters", - "description": "These parameters are specific to my user resource template", + "title": "My User Resource Template", + "description": "These is a test user resource template schema", "required": [], "properties": {} } diff --git a/api_app/models/schemas/workspace_service_template.py b/api_app/models/schemas/workspace_service_template.py --- a/api_app/models/schemas/workspace_service_template.py +++ b/api_app/models/schemas/workspace_service_template.py @@ -46,8 +46,8 @@ class Config: "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json", "type": "object", - "title": "My Workspace Service Template Custom Parameters", - "description": "These parameters are specific to my workspace service template", + "title": "My Workspace Service Template", + "description": "These is a test workspace service resource template schema", "required": [], "properties": {} } diff --git a/api_app/models/schemas/workspace_template.py b/api_app/models/schemas/workspace_template.py --- a/api_app/models/schemas/workspace_template.py +++ b/api_app/models/schemas/workspace_template.py @@ -44,8 +44,8 @@ class Config: "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json", "type": "object", - "title": "My Workspace Template Custom Parameters", - "description": "These parameters are specific to my workspace template", + "title": "My Workspace Template", + "description": "This is a test workspace template schema", "required": [ "vm_size", "no_of_vms"
diff --git a/api_app/tests_ma/conftest.py b/api_app/tests_ma/conftest.py --- a/api_app/tests_ma/conftest.py +++ b/api_app/tests_ma/conftest.py @@ -17,8 +17,8 @@ def input_workspace_template(): "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json", "type": "object", - "title": "My Workspace Template Custom Parameters", - "description": "These parameters are specific to my workspace template", + "title": "My Workspace Template", + "description": "This is a test workspace template schema.", "required": [], "properties": {} }) @@ -34,8 +34,8 @@ def input_workspace_service_template(): "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json", "type": "object", - "title": "My Workspace Service Template Custom Parameters", - "description": "These parameters are specific to my workspace service template", + "title": "My Workspace Service Template", + "description": "This is a test workspace service template schema.", "required": [], "properties": {} }) @@ -51,8 +51,8 @@ def input_user_resource_template(): "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json", "type": "object", - "title": "My User Resource Template Custom Parameters", - "description": "These parameters are specific to my user resource template", + "title": "My User Resource Template", + "description": "These is a test user resource template schema", "required": [], "properties": {} })
Rename custom_parameters.json to template_schema.json (inc in python code)
2021-09-30T13:29:00
microsoft/AzureTRE
1,039
microsoft__AzureTRE-1039
[ "1038" ]
3244daca603bbdecd8760b4b1b94793e08aa811c
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.1.1" +__version__ = "0.1.3" diff --git a/api_app/main.py b/api_app/main.py --- a/api_app/main.py +++ b/api_app/main.py @@ -1,7 +1,8 @@ import logging +import os import uvicorn -from fastapi import FastAPI +from fastapi import FastAPI, Request from fastapi.exceptions import RequestValidationError from fastapi_utils.tasks import repeat_every from starlette.exceptions import HTTPException @@ -17,6 +18,16 @@ from services.logging import disable_unwanted_loggers, initialize_logging from service_bus.deployment_status_update import receive_message_and_update_deployment +# Opencensus Azure imports +from opencensus.ext.azure.trace_exporter import AzureExporter +from opencensus.trace.attributes_helper import COMMON_ATTRIBUTES +from opencensus.trace.samplers import ProbabilitySampler +from opencensus.trace.span import SpanKind +from opencensus.trace.tracer import Tracer + +HTTP_URL = COMMON_ATTRIBUTES['HTTP_URL'] +HTTP_STATUS_CODE = COMMON_ATTRIBUTES['HTTP_STATUS_CODE'] + def get_application() -> FastAPI: application = FastAPI( @@ -64,5 +75,19 @@ async def update_deployment_status() -> None: await receive_message_and_update_deployment(app) [email protected]("http") +async def add_process_time_header(request: Request, call_next): + tracer = Tracer(exporter=AzureExporter(connection_string=f'InstrumentationKey={os.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")}'), sampler=ProbabilitySampler(1.0)) + with tracer.span("main") as span: + span.span_kind = SpanKind.SERVER + + response = await call_next(request) + + tracer.add_attribute_to_current_span(attribute_key=HTTP_STATUS_CODE, attribute_value=response.status_code) + tracer.add_attribute_to_current_span(attribute_key=HTTP_URL, attribute_value=str(request.url)) + + return response + + if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)
API app not reporting requests to AppInsights **Description** Ensure opencensus reports http requests to app insights.
2021-10-13T13:22:34
microsoft/AzureTRE
1,077
microsoft__AzureTRE-1077
[ "1076" ]
0300d32ef29616dc89a34c0362cf35c91f2a95b2
diff --git a/scripts/workspace-app-reg.py b/scripts/workspace-app-reg.py deleted file mode 100644 --- a/scripts/workspace-app-reg.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/local/bin/python3 - -import click -import sys -import uuid - -from azure.identity import AzureCliCredential -from msgraph.core import GraphClient - - -class GraphError(Exception): - def __init__(self, message: str, error: dict): - if error: - self.code: str = error['error']['code'] - self.message: str = f"{message}: {error['error']['message']}" - self.innerError: dict = error['error']['innerError'] - else: - self.message: str = message - - -class CliGraphClient(GraphClient): - def __init__(self): - super().__init__(credential=AzureCliCredential(), scopes=['https://graph.microsoft.com/.default']) - - def get(self, url: str, **kwargs): - resp = super().get(url, **kwargs) - - if not resp.ok: - raise GraphError(f"Error calling GET {url}", resp.json()) - - json = resp.json() - - if 'value' in json: - return json['value'] - - return json - - def me(self): - return self.get("/me") - - def default_domain(self): - domains = self.get("/domains") - for d in domains: - if d['isDefault']: - return d['id'] - - def get_existing_app(self, name: str) -> dict: - apps = self.get(f"/applications?$filter=displayName eq '{name}'") - - if len(apps) > 1: - raise GraphError(f"There is more than one application with the name \"{name}\" already.", None) - - if len(apps) == 1: - return apps[0] - - return None - - def create_app(self, app: dict) -> dict: - resp = self.post("/applications", json=app, headers={'Content-Type': 'application/json'}) - - if not resp.ok: - raise GraphError("Error creating application", resp.json()) - - return resp.json() - - def update_app(self, app_object_id: str, app: dict) -> dict: - resp = self.patch(f"/applications/{app_object_id}", json=app, headers={'Content-Type': 'application/json'}) - if not resp.ok: - raise GraphError("Error updating application", resp.json()) - # Now get the updated app details - resp = super().get(f"/applications/{app_object_id}") - if not resp.ok: - raise GraphError("Error getting updating application", resp.json()) - return resp.json() - - def ensure_sp(self, appId: str, roleAssignmentRequired: bool): - sp = {"appId": appId, "appRoleAssignmentRequired": roleAssignmentRequired, "tags": ['WindowsAzureActiveDirectoryIntegratedApp']} - sps = self.get(f"/servicePrincipals?$filter=appid eq '{appId}'") - if len(sps) == 0: - resp = self.post("/servicePrincipals", json=sp, headers={'Content-Type': 'application/json'}) - if not resp.ok: - raise GraphError("Error creating service principal", resp.json()) - else: - resp = self.patch(f"/servicePrincipals/{sps[0]['id']}", json=sp, headers={'Content-Type': 'application/json'}) - if not resp.ok: - raise GraphError("Error updating service principal", resp.json()) - - -def double_check(domain: str, myname: str) -> bool: - should_continue = input(f"You are about to create app registrations in the Azure AD Tenant \"{domain}\", signed in as \"{myname}\"\nDo you want to continue? (y/N) ") - return True if should_continue.lower() == "y" or should_continue.lower() == "yes" else False - - -def get_role_id(app: dict, role: str) -> str: - if app: - ids = [r['id'] for r in app['appRoles'] if r['value'] == role] - if len(ids) == 1: - return ids[0] - - return str(uuid.uuid4()) - - [email protected]() [email protected]('-n', '--tre-name', required=True) [email protected]('-w', '--workspace-name', required=True) [email protected]('-f', '--force', is_flag=True, default=False) -def main(tre_name, workspace_name, force): - graph = CliGraphClient() - - try: - - if not force and not double_check(graph.default_domain(), graph.me()['displayName']): - sys.exit(0) - - app_name = f"{tre_name} Workspace - {workspace_name}" - existing_app = graph.get_existing_app(app_name) - - # Define the App Roles - appRoles = [ - { - "id": get_role_id(existing_app, 'WorkspaceResearcher'), - "allowedMemberTypes": ["User"], - "description": f"Provides access to the {tre_name} workspace {workspace_name}.", - "displayName": "Researchers", - "isEnabled": True, - "origin": "Application", - "value": "WorkspaceResearcher" - }, - { - "id": get_role_id(existing_app, 'WorkspaceOwner'), - "allowedMemberTypes": ["User"], - "description": f"Provides ownership access to the {tre_name} workspace {workspace_name}.", - "displayName": "Owners", - "isEnabled": True, - "origin": "Application", - "value": "WorkspaceOwner" - } - ] - - # Define the API application - workspaceApp = { - "displayName": app_name, - "appRoles": appRoles, - "signInAudience": "AzureADMyOrg" - } - - if existing_app: - app = graph.update_app(existing_app['id'], workspaceApp) - print(f"Updated application \"{app['displayName']}\" (appid={app['appId']})") - else: - app = graph.create_app(workspaceApp) - print(f"Created application \"{app['displayName']}\" (appid={app['appId']})") - - if app: - graph.ensure_sp(app['appId'], True) - - except GraphError as graph_error: - print(graph_error.message) - sys.exit(1) - - -if __name__ == "__main__": - main()
diff --git a/e2e_tests/.env.tmpl b/e2e_tests/.env.tmpl --- a/e2e_tests/.env.tmpl +++ b/e2e_tests/.env.tmpl @@ -6,7 +6,7 @@ CLIENT_ID=<client_id for the app that allows direct username and password login) RESOURCE=<appid of the API app> USERNAME=<username for custom e2e tester user> PASSWORD=<password for custom e2e tester user> -SCOPE=<Workspace.Read and Workspace.Write scopes as specified in app registration>" +SCOPE=<user_impersonation scope as specified in app registration>" AUTH_TENANT_ID=<auth tenant id> TEST_WORKSPACE_APP_ID=<workspace app registration client id> ACR_NAME=<Azure container registry where the bundles are present>
Amend TRE app registration creation script to work for workspaces - requires appropriate scope - [ ] Add flag to create workspace app reg - [ ] Add workspace app reg scopes - [ ] Update core app reg scope to be a single user_impersonation scope
2021-11-23T23:32:39
microsoft/AzureTRE
1,653
microsoft__AzureTRE-1653
[ "1652" ]
db5cc7cf571868d592d987dae553ce97aa2602bf
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.10" +__version__ = "0.2.11" diff --git a/api_app/api/routes/health.py b/api_app/api/routes/health.py --- a/api_app/api/routes/health.py +++ b/api_app/api/routes/health.py @@ -3,6 +3,7 @@ from resources import strings from services.health_checker import create_resource_processor_status, create_state_store_status, create_service_bus_status from fastapi import HTTPException, status +import logging router = APIRouter() @@ -17,5 +18,8 @@ async def health_check() -> HealthCheck: ServiceStatus(service=strings.RESOURCE_PROCESSOR, status=rp_status, message=rp_message)] health_check_result = HealthCheck(services=services) if cosmos_status == StatusEnum.not_ok or sb_status == StatusEnum.not_ok or rp_status == StatusEnum.not_ok: + logging.error(f'Cosmos Status: {cosmos_status}, message: {cosmos_message}') + logging.error(f'Service Bus Status: {sb_status}, message: {sb_message}') + logging.error(f'Resource Processor Status: {rp_status}, message: {rp_message}') raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=health_check_result.json()) return health_check_result
Health check endpoint should log all the service status as it queries Currently the `/health` endpoint queries Cosmos / Service Bus / the RP - and returns the statuses. If any are not ok, the response is a 503. There is currently no way to query that endpoint when the gateway has blocked access - so we at least need it to log the results so we can track back and see what service was down, when.
2022-04-06T09:18:22
microsoft/AzureTRE
1,656
microsoft__AzureTRE-1656
[ "1655" ]
0bba17fcb0f733aa41f4341bce89cdf76c4f33b7
diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -52,10 +52,11 @@ async def receive_message(service_bus_client): while True: try: logger_adapter.info("Looking for new session...") - async with service_bus_client.get_queue_receiver(queue_name=q_name, session_id=NEXT_AVAILABLE_SESSION) as receiver: + # max_wait_time=1 -> don't hold the session open after processing of the message has finished + async with service_bus_client.get_queue_receiver(queue_name=q_name, max_wait_time=1, session_id=NEXT_AVAILABLE_SESSION) as receiver: logger_adapter.info("Got a session containing messages") async with AutoLockRenewer() as renewer: - # allow a message to be auto lock renewed for up to an hour + # allow a session to be auto lock renewed for up to an hour - if it's processing a message renewer.register(receiver, receiver.session, max_lock_renewal_duration=3600) async for msg in receiver: @@ -75,23 +76,23 @@ async def receive_message(service_bus_client): else: logging.error('Message processing failed!') - logger_adapter.info(f"Message with id = {message['id']} processed as {result} and marked complete.") + logger_adapter.info(f"Message for resource_id={message['id']}, operation_id={message['operationId']} processed as {result} and marked complete.") await receiver.complete_message(msg) + logger_adapter.info("Closing session") + await renewer.close() + except OperationTimeoutError: # Timeout occurred whilst connecting to a session - this is expected and indicates no non-empty sessions are available - logger_adapter.info("No sessions for this process. Sleeping 30s then will look again...") + logger_adapter.info("No sessions for this process. Will look again...") except ServiceBusConnectionError: # Occasionally there will be a transient / network-level error in connecting to SB. - logger_adapter.info("Unknown Service Bus connection error. Sleeping and will retry...") + logger_adapter.info("Unknown Service Bus connection error. Will retry...") except Exception: # Catch all other exceptions, log them via .exception to get the stack trace, sleep, and reconnect - logger_adapter.exception("Unknown exception. Sleeping and will retry...") - - finally: - await asyncio.sleep(30) + logger_adapter.exception("Unknown exception. Will retry...") async def run_porter(command):
Service Bus Sessions never terminate After a processor receives a message on a session, it hangs onto that session indefinitely, blocking the thread and meaning other messages cannot be processed. We need to terminate the session after each message has been processed / errored out.
2022-04-06T11:30:24
microsoft/AzureTRE
1,685
microsoft__AzureTRE-1685
[ "1677" ]
02b13f5ccf9c94a4ddeaa11931887cfe04764c4c
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.12" +__version__ = "0.2.13" diff --git a/api_app/db/repositories/resource_templates.py b/api_app/db/repositories/resource_templates.py --- a/api_app/db/repositories/resource_templates.py +++ b/api_app/db/repositories/resource_templates.py @@ -101,6 +101,9 @@ def create_template(self, template_input: ResourceTemplateInCreate, resource_typ "customActions": template_input.customActions } + if "pipeline" in template_input.json_schema: + template["pipeline"] = template_input.json_schema["pipeline"] + if resource_type == ResourceType.UserResource: template["parentWorkspaceService"] = parent_service_name template = parse_obj_as(UserResourceTemplate, template) diff --git a/api_app/models/domain/resource_template.py b/api_app/models/domain/resource_template.py --- a/api_app/models/domain/resource_template.py +++ b/api_app/models/domain/resource_template.py @@ -43,6 +43,7 @@ class ResourceTemplate(AzureTREModel): properties: Dict[str, Property] = Field(title="Template properties") actions: List[CustomAction] = Field(default=[], title="Template actions") customActions: List[CustomAction] = Field(default=[], title="Template custom actions") + pipeline: Optional[dict] = Field(default=None, title="Template pipeline to define updates to other resources") # setting this to false means if extra, unexpected fields are supplied, the request is invalidated additionalProperties: bool = Field(default=False, title="Prevent unspecified properties being applied")
diff --git a/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py b/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py --- a/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py +++ b/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py @@ -1,5 +1,6 @@ import pytest from mock import patch +from models.domain.user_resource_template import UserResourceTemplate from db.repositories.resource_templates import ResourceTemplateRepository from db.errors import EntityDoesNotExist @@ -145,3 +146,34 @@ def test_create_item_created_with_the_expected_type(uuid_mock, save_item_mock, r ) save_item_mock.assert_called_once_with(expected_resource_template) assert expected_resource_template == returned_template + + +@patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') +@patch('uuid.uuid4') +def test_create_item_with_pipeline_succeeds(uuid_mock, save_item_mock, resource_template_repo, input_user_resource_template): + uuid_mock.return_value = "1234" + expected_type = ResourceType.UserResource + # add the pipeline block + pipeline = { + "upgrade": [], + "install": [], + "uninstall": [] + } + input_user_resource_template.json_schema["pipeline"] = pipeline + returned_template = resource_template_repo.create_template(input_user_resource_template, expected_type) + expected_resource_template = UserResourceTemplate( + id="1234", + name=input_user_resource_template.name, + title=input_user_resource_template.json_schema["title"], + description=input_user_resource_template.json_schema["description"], + version=input_user_resource_template.version, + resourceType=expected_type, + properties=input_user_resource_template.json_schema["properties"], + customActions=input_user_resource_template.customActions, + required=input_user_resource_template.json_schema["required"], + current=input_user_resource_template.current, + pipeline=pipeline, + parentWorkspaceService="" + ) + save_item_mock.assert_called_once_with(expected_resource_template) + assert expected_resource_template == returned_template
Create basic, no-op `pipeline: {}` block in new `dev-vm` resource This task is to enable future work on the `pipeline: {}` without interfering with existing functionality. - [x] Copy the `guacamole-azure-linuxvm` to `guacamole-dev-vm` - [x] Add a basic 3-step `pipeline: {}` to the schema to update the `display_name` of the firewall shared service, run main, then update the firewall name again. - [x] Make sure the `pipeline: {}` block is ignored/removed during schema validation. - [x] Start a doc to describe what the `pipeline:{}` is for. This will be updated as features are added in subsequent tasks.
2022-04-11T16:50:44
microsoft/AzureTRE
1,686
microsoft__AzureTRE-1686
[ "1549" ]
70a7b0462dd75d824aa446f88eb3b8e3d7cbc27c
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.13" +__version__ = "0.2.14" diff --git a/api_app/api/routes/api.py b/api_app/api/routes/api.py --- a/api_app/api/routes/api.py +++ b/api_app/api/routes/api.py @@ -113,6 +113,14 @@ async def get_openapi_json(workspace_id: str, request: Request, workspace_repo=D } openapi_definitions[workspace_id]['components']['securitySchemes']['oauth2']['flows']['authorizationCode']['scopes'] = workspace_scopes + # Add an example into every workspace_id path parameter so users don't have to cut and paste them in. + for route in openapi_definitions[workspace_id]['paths'].values(): + for verb in route.values(): + # We now have a list of parameters for each route + for parameter in verb['parameters']: + if (parameter['name'] == 'workspace_id'): + parameter['schema']['example'] = workspace_id + return openapi_definitions[workspace_id]
WS id can be taken from url **Describe the bug** When using the 'second' swagger api (https://<azure_tre_fqdn>/api/workspaces/<workspace_id>/docs), the one intended for the ws owner. The url itself contains the ws id and then it is still required for each api call If possible to reduce this parameter it will improve usability **Steps to reproduce** ![image](https://user-images.githubusercontent.com/13205761/159153482-a625994d-f70c-4236-b9ef-b3756575ca49.png)
2022-04-11T23:34:54
microsoft/AzureTRE
1,754
microsoft__AzureTRE-1754
[ "1753" ]
24574065722cb7ef22039d55e945d1f55e3a7f2b
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.28" +__version__ = "0.3.0"
Release version 0.3 ## Description As a TRE developer I want to release current code base as version 0.3 So that people can use a more stable version going forward ## Acceptance criteria - [ ] All core apps are bumped to 0.3 - [ ] All bundles are bumped to 0.3 - [ ] A tag is created - [ ] A release is created
2022-04-28T10:38:05
microsoft/AzureTRE
1,757
microsoft__AzureTRE-1757
[ "1759" ]
c1ce0a0eee3f9d27d5efecf32002c1f171295a40
diff --git a/scripts/db_migrations.py b/scripts/db_migrations.py --- a/scripts/db_migrations.py +++ b/scripts/db_migrations.py @@ -16,11 +16,14 @@ class TRECosmosDBMigrations: def __init__(self): + if (self.can_connect_to_cosmos()): + url = os.environ['STATE_STORE_ENDPOINT'] + key = self.get_store_key() + self.client = CosmosClient(url=url, credential=key) + self.database = self.client.get_database_client(STATE_STORE_DATABASE) - url = os.environ['STATE_STORE_ENDPOINT'] - key = self.get_store_key() - self.client = CosmosClient(url=url, credential=key) - self.database = self.client.get_database_client(STATE_STORE_DATABASE) + def can_connect_to_cosmos(self) -> bool: + return os.getenv('ENABLE_LOCAL_DEBUGGING', 'False').lower() in ('true', 1, 't') if 'ENABLE_LOCAL_DEBUGGING' in os.environ else False def get_store_key(self) -> str: if 'STATE_STORE_KEY' in os.environ: @@ -106,24 +109,27 @@ def moveAuthInformationToProperties(self, resources_container_name): def main(): migrations = TRECosmosDBMigrations() - # PR 1030 - migrations.renameCosmosDBFields("Resources", 'resourceTemplateName', 'templateName') - migrations.renameCosmosDBFields("Resources", 'resourceTemplateVersion', 'templateVersion') - migrations.renameCosmosDBFields("Resources", 'resourceTemplateParameters', 'properties') - - # PR 1031 - migrations.renameCosmosDBFields("Resources", 'workspaceType', 'templateName') - migrations.renameCosmosDBFields("Resources", 'workspaceServiceType', 'templateName') - migrations.renameCosmosDBFields("Resources", 'userResourceType', 'templateName') - - # Operations History - migrations.moveDeploymentsToOperations("Resources", "Operations") - - # Shared services (PR #1717) - migrations.deleteDuplicatedSharedServices("Resources") - - # Authentication needs to be in properties so we can update them. (PR #1726) - migrations.moveAuthInformationToProperties("Resources") + if not migrations.can_connect_to_cosmos(): + print('You cannot migrate the cosmos database without setting ENABLE_LOCAL_DEBUGGING to true.') + else: + # PR 1030 + migrations.renameCosmosDBFields("Resources", 'resourceTemplateName', 'templateName') + migrations.renameCosmosDBFields("Resources", 'resourceTemplateVersion', 'templateVersion') + migrations.renameCosmosDBFields("Resources", 'resourceTemplateParameters', 'properties') + + # PR 1031 + migrations.renameCosmosDBFields("Resources", 'workspaceType', 'templateName') + migrations.renameCosmosDBFields("Resources", 'workspaceServiceType', 'templateName') + migrations.renameCosmosDBFields("Resources", 'userResourceType', 'templateName') + + # Operations History + migrations.moveDeploymentsToOperations("Resources", "Operations") + + # Shared services (PR #1717) + migrations.deleteDuplicatedSharedServices("Resources") + + # Authentication needs to be in properties so we can update them. (PR #1726) + migrations.moveAuthInformationToProperties("Resources") if __name__ == "__main__":
make all/tre-deploy fails due to db-migrate **Describe the bug** Running make `all` (or `tre-deploy` for that matter) fails because it runs `db-migrate` that needs direct access to cosmos. **Steps to reproduce** 1. Make sure you don't have `ENABLE_LOCAL_DEBUGGING=true` in your .env files 2. Deploy a new environment by running `make all` or `make tre-deploy` 3. See that the activity fail due to lack of access to cosmos
2022-04-28T22:58:55
microsoft/AzureTRE
1,764
microsoft__AzureTRE-1764
[ "1736" ]
8a2f7998829142c2ad00780b30fd3cabb79f6658
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.23" +__version__ = "0.2.24" diff --git a/api_app/models/domain/resource_template.py b/api_app/models/domain/resource_template.py --- a/api_app/models/domain/resource_template.py +++ b/api_app/models/domain/resource_template.py @@ -23,6 +23,7 @@ class Property(AzureTREModel): pattern: Optional[str] = Field(None, title="Pattern") updateable: Optional[bool] = Field(None, title="Indicates that the field can be updated") readOnly: Optional[bool] = Field(None, title="Indicates the field is read-only") + items: Optional[dict] = None # items can contain sub-properties class CustomAction(AzureTREModel): diff --git a/resource_processor/resources/commands.py b/resource_processor/resources/commands.py --- a/resource_processor/resources/commands.py +++ b/resource_processor/resources/commands.py @@ -1,6 +1,8 @@ import asyncio import json import logging +import base64 + from resources.helpers import get_installation_id from shared.logging import shell_output_logger @@ -50,6 +52,13 @@ async def build_porter_command(config, logger, msg_body, custom_action=False): # only append if we have a value, porter will complain anyway about missing parameters if parameter_value is not None: + if isinstance(parameter_value, dict) or isinstance(parameter_value, list): + # base64 encode complex types to pass in safely + val = json.dumps(parameter_value) + val_bytes = val.encode("ascii") + val_base64_bytes = base64.b64encode(val_bytes) + parameter_value = val_base64_bytes.decode("ascii") + porter_parameters = porter_parameters + f" --param {parameter_name}=\"{parameter_value}\"" installation_id = get_installation_id(msg_body)
diff --git a/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py b/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py --- a/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py +++ b/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py @@ -89,6 +89,45 @@ def sample_resource_template() -> ResourceTemplate: actions=[]).dict(exclude_none=True) +def sample_nested_template() -> ResourceTemplate: + return ResourceTemplate( + id="123", + name="template1", + description="description", + version="0.1.0", + resourceType=ResourceType.Workspace, + current=True, + required=[], + properties={ + 'rules': { + 'type': 'array', + 'items': { + 'type': 'object', + 'required': [], + 'properties': { + 'protocol': { + 'type': 'object', + 'required': ['port'], + 'items': { + 'type': 'object', + 'properties': { + 'port': { + 'type': 'string' + }, + 'method': { + 'type': 'string' + } + } + } + } + } + } + } + }, + customActions=[] + ).dict(exclude_none=True) + + @patch("db.repositories.resources.ResourceRepository._get_enriched_template") @patch("db.repositories.resources.ResourceRepository._validate_resource_parameters", return_value=None) def test_validate_input_against_template_returns_template_version_if_template_is_valid(_, enriched_template_mock, resource_repo, workspace_input): @@ -141,6 +180,42 @@ def test_validate_input_against_template_raises_value_error_if_payload_is_invali resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace) +@patch("db.repositories.resources.ResourceRepository._get_enriched_template") +def test_validate_input_against_nested_template_missing_nested_prop(enriched_template_mock, resource_repo): + enriched_template_mock.return_value = sample_nested_template() + # missing port + nested_input = WorkspaceInCreate(templateName="template1") + nested_input.properties['rules'] = [ + { + 'protocol': { + 'method': 'post' + } + } + ] + + with pytest.raises(ValidationError): + resource_repo.validate_input_against_template("template1", nested_input, ResourceType.Workspace) + + +@patch("db.repositories.resources.ResourceRepository._get_enriched_template") +def test_validate_input_against_nested_template_valid(enriched_template_mock, resource_repo): + enriched_template_mock.return_value = sample_nested_template() + + # has required props, nested + nested_input = WorkspaceInCreate(templateName="template1") + nested_input.properties['rules'] = [ + { + 'protocol': { + 'method': 'post', + 'port': '1234' + } + } + ] + + resp_template = resource_repo.validate_input_against_template("template1", nested_input, ResourceType.Workspace) + assert resp_template is not None + + @patch("db.repositories.resources.ResourceTemplateRepository.get_current_template") def test_get_enriched_template_returns_the_enriched_template(get_current_mock, resource_repo): workspace_template = ResourceTemplate(id="abc", name="template1", description="", version="", resourceType=ResourceType.Workspace, current=True, required=[], properties={}, customActions=[]) diff --git a/e2e_tests/test_shared_services.py b/e2e_tests/test_shared_services.py --- a/e2e_tests/test_shared_services.py +++ b/e2e_tests/test_shared_services.py @@ -1,7 +1,11 @@ import pytest import logging -from helpers import disable_and_delete_resource, post_resource, get_shared_service_id_by_name +from helpers import ( + disable_and_delete_resource, + post_resource, + get_shared_service_id_by_name, +) from resources import strings @@ -15,14 +19,74 @@ async def test_patch_firewall(admin_token, verify): patch_payload = { "properties": { "display_name": "TEST", + "rule_collections": [ + { + "name": "e2e-rule-collection-1", + "action": "Allow", + "rules": [ + { + "name": "e2e test rule 1", + "description": "desc here", + "protocols": [{"port": "5555", "type": "Http"}], + "target_fqdns": [ + "one.two.three.microsoft.com", + "two.three.microsoft.com" + ], + "source_addresses": ["172.196.0.0"] + } + ] + }, + { + "name": "e2e-rule-collection-2", + "action": "Allow", + "rules": [ + { + "name": "e2e test rule 1", + "description": "desc here", + "protocols": [{"port": "5556", "type": "Http"}], + "target_fqdns": [ + "one.two.microsoft.com", + "two.microsoft.com" + ], + "source_addresses": ["172.196.0.1"] + } + ] + }, + { + "name": "e2e-rule-collection-3", + "action": "Allow", + "priority": 501, + "rules": [ + { + "name": "e2e test rule 1", + "description": "desc here", + "protocols": [{"port": "5557", "type": "Http"}], + "target_fqdns": [ + "one.two.three.microsoft.com.uk" + ], + "source_addresses": ["172.196.0.2"] + } + ] + } + ], }, "templateName": template_name, } - shared_service_firewall = await get_shared_service_id_by_name(template_name, verify, admin_token) + shared_service_firewall = await get_shared_service_id_by_name( + template_name, verify, admin_token + ) shared_service_path = f'/shared-services/{shared_service_firewall["id"]}' - await post_resource(patch_payload, f'/api{shared_service_path}', 'shared_service', admin_token, None, verify, method="PATCH") + await post_resource( + patch_payload, + f"/api{shared_service_path}", + "shared_service", + admin_token, + None, + verify, + method="PATCH", + ) shared_service_templates_to_create = [ @@ -36,20 +100,35 @@ async def test_patch_firewall(admin_token, verify): @pytest.mark.parametrize("template_name", shared_service_templates_to_create) async def test_create_shared_service(template_name, admin_token, verify) -> None: # Check that the shared service hasn't already been created - shared_service = await get_shared_service_id_by_name(template_name, verify, admin_token) + shared_service = await get_shared_service_id_by_name( + template_name, verify, admin_token + ) if shared_service: id = shared_service["id"] - LOGGER.info(f"Shared service {template_name} already exists (id {id}), deleting it first...") - await disable_and_delete_resource(f'/api/shared-services/{id}', 'shared_service', admin_token, None, verify) + LOGGER.info( + f"Shared service {template_name} already exists (id {id}), deleting it first..." + ) + await disable_and_delete_resource( + f"/api/shared-services/{id}", "shared_service", admin_token, None, verify + ) post_payload = { "templateName": template_name, "properties": { "display_name": f"Shared service {template_name}", - "description": f"{template_name} deployed via e2e tests" - } + "description": f"{template_name} deployed via e2e tests", + }, } - shared_service_path, _ = await post_resource(post_payload, '/api/shared-services', 'shared_service', admin_token, None, verify) + shared_service_path, _ = await post_resource( + post_payload, + "/api/shared-services", + "shared_service", + admin_token, + None, + verify, + ) - await disable_and_delete_resource(f'/api{shared_service_path}', 'shared_service', admin_token, None, verify) + await disable_and_delete_resource( + f"/api{shared_service_path}", "shared_service", admin_token, None, verify + )
Change firewall bundle to read rules from resource properties In order for the firewall to be updated by the API / other resources as part of pipelines, it must read the 'configurable' properties (parts of the firewall we want to allow a user/process to update) from the resource properties in cosmos.
2022-05-03T16:16:03
LibraryOfCongress/concordia
230
LibraryOfCongress__concordia-230
[ "229" ]
9d64aa9fbf677c03ed7031dfcdd0efdcc3bf24e7
diff --git a/concordia/settings_template.py b/concordia/settings_template.py --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -103,15 +103,17 @@ MIDDLEWARE = [ "django_prometheus_metrics.middleware.PrometheusBeforeMiddleware", "django.middleware.security.SecurityMiddleware", + # WhiteNoise serves static files efficiently: + "whitenoise.middleware.WhiteNoiseMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", + "maintenance_mode.middleware.MaintenanceModeMiddleware", # Machina "machina.apps.forum_permission.middleware.ForumPermissionMiddleware", - "maintenance_mode.middleware.MaintenanceModeMiddleware", ] TEMPLATES = [ @@ -277,15 +279,16 @@ "REGION": os.getenv("AWS_REGION"), } +STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" +WHITENOISE_ROOT = STATIC_ROOT + PASSWORD_RESET_TIMEOUT_DAYS = 1 ACCOUNT_ACTIVATION_DAYS = 1 REGISTRATION_OPEN = True # set to false to temporarily disable registrations -MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' +MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage" -MESSAGE_TAGS = { - messages.ERROR: 'danger', -} +MESSAGE_TAGS = {messages.ERROR: "danger"} SENTRY_DSN = os.environ.get("SENTRY_DSN", "") SENTRY_PUBLIC_DSN = os.environ.get("SENTRY_PUBLIC_DSN", "")
Change Docker entrypoint to use gunicorn * [x] Run WSGI server using gunicorn * [ ] Do some basic testing with boom / siege / wrk to set reasonable defaults for concurrency
2018-09-20T13:47:23
LibraryOfCongress/concordia
240
LibraryOfCongress__concordia-240
[ "208" ]
556c3353cb983a217407494c42e1cb2fe5171fda
diff --git a/concordia/context_processors.py b/concordia/context_processors.py --- a/concordia/context_processors.py +++ b/concordia/context_processors.py @@ -6,7 +6,10 @@ def system_configuration(request): Expose some system configuration to the default template context """ - return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)} + return { + "SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None), + "CONCORDIA_ENVIRONMENT": settings.CONCORDIA_ENVIRONMENT, + } def site_navigation(request):
Enable Adobe DTM for Analytics (closes #160) This embeds the code but Adobe’s instructions violate web performance guidelines and we should review this carefully to see how much Adobe is affecting site performance.
By review carefully, you mean we should evaluate the performance after I click approve and merge? :) @rstorey Yes – as in we should make sure we have a way to know how long our pages are taking to load for real visitors The original instructions didn't mention this but we need to make this conditional based on the environment. Do you want to make it a setting or just have a if check on the CONCORDIA_ENVIRONMENT setting? Let's use the CONCORDIA_ENVIRONMENT setting. I also have an ENV_NAME setting introduced in the deployment branch which is used to determine which secrets to retrieve. I'll check in with Leah about the placement of the script. That seems odd. @elainekamlley apparently it's Adobe's requirement since they want to be able to run earlier. I think it'll be covered by monitoring it and we can assess strategies for mitigation if it becomes one of the primary slow points.
2018-09-20T15:17:07
LibraryOfCongress/concordia
307
LibraryOfCongress__concordia-307
[ "300" ]
af6432e23a787bc32c7acb47519bcae19f8db4f1
diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -266,7 +266,7 @@ def get_queryset(self): slug=self.kwargs["slug"], ) - asset_qs = self.item.asset_set.all() + asset_qs = self.item.asset_set.all().order_by("sequence") asset_qs = asset_qs.select_related( "item__project__campaign", "item__project", "item" )
Images in an item should be sorted sequentially not alphabetically **What behavior did you observe? Please describe the bug** Items with more than 10 or more than 100 images don't display in the correct order. They seem to be alphabetically sorted. Images are displayed 1, 10, 100, etc. instead of 1, 2, 3. **How can we reproduce the bug?** Steps to reproduce the behavior: 1. Go to any item that has more than 10 or more than 100 items. 2. Click on the Next arrow after the first page. 3. Look at the image numbers displayed under each image. **What is the expected behavior?** Images should be displayed sequentially starting from 1. Image 100 should come after 99, Image 10 after 9, etc.
2018-09-28T17:15:53
LibraryOfCongress/concordia
308
LibraryOfCongress__concordia-308
[ "291" ]
c830da593cbeb0ddd7582e867e62157497763ca2
diff --git a/concordia/urls.py b/concordia/urls.py --- a/concordia/urls.py +++ b/concordia/urls.py @@ -140,7 +140,7 @@ ) urlpatterns = [ - re_path(r"^$", TemplateView.as_view(template_name="home.html"), name="homepage"), + re_path(r"^$", views.HomeView.as_view(), name="homepage"), path(r"healthz", views.healthz, name="health-check"), path("about/", views.static_page, name="about"), re_path(r"^contact/$", views.ContactUsView.as_view(), name="contact"), diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -145,6 +145,13 @@ def form_valid(self, form): return super().form_valid(form) +class HomeView(ListView): + template_name = "home.html" + + queryset = Campaign.objects.published().order_by("title") + context_object_name = "campaigns" + + class CampaignListView(ListView): template_name = "transcriptions/campaign_list.html" paginate_by = 10
Link campaigns to project pages on the homepage In the campaign section: - [x] Remove the get started button - [x] link image to go to the projects page for that campaign
2018-09-28T18:48:00
LibraryOfCongress/concordia
354
LibraryOfCongress__concordia-354
[ "174" ]
40c3f95d7355363862bdabbe7cbd56e86957e23b
diff --git a/concordia/models.py b/concordia/models.py --- a/concordia/models.py +++ b/concordia/models.py @@ -84,7 +84,7 @@ def get_absolute_url(self): return reverse("transcriptions:campaign", args=(self.slug,)) -class Project(models.Model): +class Project(MetricsModelMixin("project"), models.Model): objects = PublicationManager() campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE) @@ -112,7 +112,7 @@ def get_absolute_url(self): ) -class Item(models.Model): +class Item(MetricsModelMixin("item"), models.Model): objects = PublicationManager() project = models.ForeignKey( @@ -155,7 +155,7 @@ def get_absolute_url(self): ) -class Asset(models.Model): +class Asset(MetricsModelMixin("asset"), models.Model): item = models.ForeignKey(Item, on_delete=models.CASCADE) title = models.CharField(max_length=100) @@ -199,7 +199,7 @@ def get_absolute_url(self): ) -class Tag(models.Model): +class Tag(MetricsModelMixin("tag"), models.Model): TAG_VALIDATOR = RegexValidator(r"^[- _'\w]{1,50}$") value = models.CharField(max_length=50, validators=[TAG_VALIDATOR]) @@ -207,7 +207,9 @@ def __str__(self): return self.value -class UserAssetTagCollection(models.Model): +class UserAssetTagCollection( + MetricsModelMixin("user_asset_tag_collection"), models.Model +): asset = models.ForeignKey(Asset, on_delete=models.CASCADE) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) @@ -220,7 +222,7 @@ def __str__(self): return "{} - {}".format(self.asset, self.user) -class Transcription(models.Model): +class Transcription(MetricsModelMixin("transcription"), models.Model): asset = models.ForeignKey(Asset, on_delete=models.CASCADE) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
MetricsModelMixin is missing from several models
2018-10-09T14:25:50
LibraryOfCongress/concordia
370
LibraryOfCongress__concordia-370
[ "369" ]
3f556708c479362d5f8c81963033b059aceea167
diff --git a/importer/tasks.py b/importer/tasks.py --- a/importer/tasks.py +++ b/importer/tasks.py @@ -16,7 +16,7 @@ from django.utils.timezone import now from requests.exceptions import HTTPError -from concordia.models import Asset, MediaType +from concordia.models import Asset, Item, MediaType from concordia.storage import ASSET_STORAGE from importer.models import ImportItem, ImportItemAsset, ImportJob @@ -259,10 +259,9 @@ def create_item_import_task(self, import_job_pk, item_url): resp.raise_for_status() item_data = resp.json() - item, item_created = import_job.project.item_set.get_or_create( + item, item_created = Item.objects.get_or_create( item_id=get_item_id_from_item_url(item_data["item"]["id"]), - item_url=item_url, - project=import_job.project, + defaults={"item_url": item_url, "project": import_job.project}, ) import_item, import_item_created = import_job.items.get_or_create( @@ -272,7 +271,10 @@ def create_item_import_task(self, import_job_pk, item_url): if not item_created: logger.warning("Not reprocessing existing item %s", item) import_item.status = "Not reprocessing existing item %s" % item - import_item.completed = now() + import_item.completed = import_item.last_started = now() + import_item.task_id = self.request.id + import_item.full_clean() + import_item.save() return import_item.item.metadata.update(item_data)
Change import duplicate item check to be global rather than per project Following on from discussion between @vvh and @acdha
2018-10-11T18:34:09
LibraryOfCongress/concordia
373
LibraryOfCongress__concordia-373
[ "271" ]
2a5db1302be8eb2c79624f0fd32976a3d4928992
diff --git a/concordia/admin.py b/concordia/admin.py --- a/concordia/admin.py +++ b/concordia/admin.py @@ -12,6 +12,7 @@ from django.utils.html import format_html from django.views.decorators.cache import never_cache +from exporter import views as exporter_views from importer.tasks import import_items_into_project_from_url from importer.utils.excel import slurp_excel @@ -26,6 +27,7 @@ Transcription, UserAssetTagCollection, ) +from .views import ReportCampaignView def publish_action(modeladmin, request, queryset): @@ -201,6 +203,32 @@ class CampaignAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin): actions = (publish_action, unpublish_action) + def get_urls(self): + urls = super().get_urls() + + app_label = self.model._meta.app_label + model_name = self.model._meta.model_name + + custom_urls = [ + path( + "exportCSV/<path:campaign_slug>", + exporter_views.ExportCampaignToCSV.as_view(), + name=f"{app_label}_{model_name}_export-csv", + ), + path( + "exportBagIt/<path:campaign_slug>", + exporter_views.ExportCampaignToBagit.as_view(), + name=f"{app_label}_{model_name}_export-bagit", + ), + path( + "report/<path:campaign_slug>", + ReportCampaignView.as_view(), + name=f"{app_label}_{model_name}_report", + ), + ] + + return custom_urls + urls + @admin.register(Resource) class ResourceAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin): diff --git a/concordia/urls.py b/concordia/urls.py --- a/concordia/urls.py +++ b/concordia/urls.py @@ -26,13 +26,13 @@ views.ConcordiaAlternateAssetView.as_view(), name="alternate-asset", ), - re_path( - r"exportCSV/([^/]+)/$", + path( + "exportCSV/<slug:campaign_slug>/", exporter_views.ExportCampaignToCSV.as_view(), name="export-csv", ), - re_path( - r"exportBagIt/([^/]+)/$", + path( + "exportBagIt/<slug:campaign_slug>/", exporter_views.ExportCampaignToBagit.as_view(), name="export-bagit", ), diff --git a/exporter/views.py b/exporter/views.py --- a/exporter/views.py +++ b/exporter/views.py @@ -4,10 +4,11 @@ import bagit from django.conf import settings -from django.http import HttpResponse -from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required +from django.http import HttpResponse from django.utils.decorators import method_decorator +from django.views.generic import TemplateView + from concordia.models import Asset, Campaign, Transcription, UserAssetTagCollection from concordia.storage import ASSET_STORAGE @@ -22,7 +23,7 @@ class ExportCampaignToCSV(TemplateView): @method_decorator(login_required) def get(self, request, *args, **kwargs): - campaign = Campaign.objects.get(slug=self.args[0]) + campaign = Campaign.objects.get(slug=self.kwargs["campaign_slug"]) asset_list = Asset.objects.filter(item__project__campaign=campaign).order_by( "title", "sequence" ) @@ -75,7 +76,7 @@ class ExportCampaignToBagit(TemplateView): @method_decorator(login_required) def get(self, request, *args, **kwargs): - campaign = Campaign.objects.get(slug=self.args[0]) + campaign = Campaign.objects.get(slug=self.kwargs["campaign_slug"]) asset_list = Asset.objects.filter(item__project__campaign=campaign).order_by( "title", "sequence" )
diff --git a/concordia/tests/utils.py b/concordia/tests/utils.py --- a/concordia/tests/utils.py +++ b/concordia/tests/utils.py @@ -85,12 +85,18 @@ def create_asset( slug="test-asset", media_type=MediaType.IMAGE, media_url="1.jpg", + published=True, **kwargs, ): if item is None: item = create_item() asset = item.asset_set.create( - title=title, slug=slug, media_type=media_type, media_url=media_url, **kwargs + title=title, + slug=slug, + media_type=media_type, + published=published, + media_url=media_url, + **kwargs, ) asset.full_clean() asset.save()
Move admin functions to Django admin **Is your feature request related to a problem? Please describe.** The site can't be cached because of the presence of admin buttons in the end-user view. **Describe the solution you'd like** The following admin functions need to be implemented in the Django admin: Move admin functions from campaigns public UI page to Django admin - [x] export CSV - [x] export BagIt - [x] publish - [x] unpublish - [x] delete - [x] Report Project admin functions move to django admin - [x] publish - [x] unpublish - [x] delete Assets admin function: - [x] hiding individual assets
2018-10-11T20:45:51
LibraryOfCongress/concordia
377
LibraryOfCongress__concordia-377
[ "158" ]
095c8d119c5d2e5f11eda019b55806d5dd9473a1
diff --git a/concordia/forms.py b/concordia/forms.py --- a/concordia/forms.py +++ b/concordia/forms.py @@ -79,19 +79,9 @@ class AssetFilteringForm(forms.Form): widget=forms.Select(attrs={"class": "form-control"}), ) - def __init__(self, asset_qs, *args, **kwargs): + def __init__(self, status_counts, *args, **kwargs): super().__init__(*args, **kwargs) - # We want to get a list of all of the available asset states in this - # item's assets and will return that with the preferred display labels - # including the asset count to be displayed in the filter UI - asset_state_qs = asset_qs.values_list("transcription_status") - asset_state_qs = asset_state_qs.annotate( - Count("transcription_status") - ).order_by() - - status_counts = dict(asset_state_qs) - asset_statuses = { status: "%s (%d)" % (TranscriptionStatus.CHOICE_MAP[status], count) for status, count in status_counts.items() diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -233,7 +233,17 @@ def get_queryset(self): def apply_asset_filters(self, asset_qs): """Use optional GET parameters to filter the asset list""" - self.filter_form = form = self.form_class(asset_qs, self.request.GET) + # We want to get a list of all of the available asset states in this + # item's assets and will return that with the preferred display labels + # including the asset count to be displayed in the filter UI + asset_state_qs = asset_qs.values_list("transcription_status") + asset_state_qs = asset_state_qs.annotate( + Count("transcription_status") + ).order_by() + + self.transcription_status_counts = status_counts = dict(asset_state_qs) + + self.filter_form = form = self.form_class(status_counts, self.request.GET) if form.is_valid(): asset_qs = asset_qs.filter( **{k: v for k, v in form.cleaned_data.items() if v} @@ -244,12 +254,33 @@ def apply_asset_filters(self, asset_qs): def get_context_data(self, **kwargs): res = super().get_context_data(**kwargs) + # We'll collect some extra stats for the progress bar. We can reuse the values + # which are calculated for the transcription status filters but that displays + # items as open for edit whether or not anyone has started transcribing them. + # For the progress bar, we'll only count the records which have at least one + # transcription, no matter how far along it is: + + contributors = Transcription.objects.filter(asset__item=self.item).aggregate( + Count("user", distinct=True), Count("asset", distinct=True) + ) + + asset_count = len(self.object_list) + if asset_count: + in_progress_percent = round( + 100 * (contributors["asset__count"] / asset_count) + ) + else: + in_progress_percent = 0 + res.update( { "campaign": self.item.project.campaign, "project": self.item.project, "item": self.item, "filter_form": self.filter_form, + "transcription_status_counts": self.transcription_status_counts, + "contributor_count": contributors["user__count"], + "in_progress_percent": in_progress_percent, } ) return res
Progress percentage to show level of completion on an item As a user, I want to know how complete an item is so that I can decide if I want to click it and transcribe Acceptance criteria: - [x] A function that does the math to generates a percentage based on how many items were completed and items that are open for editing - [x] UI of percentage based on wireframe https://github.com/LibraryOfCongress/concordia/issues/41
2018-10-12T14:44:36
LibraryOfCongress/concordia
379
LibraryOfCongress__concordia-379
[ "283" ]
889ad595d7a8235266185f6365188c19d46c4bb9
diff --git a/concordia/urls.py b/concordia/urls.py --- a/concordia/urls.py +++ b/concordia/urls.py @@ -20,11 +20,6 @@ [ path("", views.CampaignListView.as_view(), name="campaigns"), path("<slug:slug>/", views.CampaignDetailView.as_view(), name="campaign"), - re_path( - r"^alternateasset/$", - views.ConcordiaAlternateAssetView.as_view(), - name="alternate-asset", - ), path( "exportCSV/<slug:campaign_slug>/", exporter_views.ExportCampaignToCSV.as_view(), @@ -50,6 +45,11 @@ views.ConcordiaProjectView.as_view(), name="project-detail", ), + path( + "<slug:campaign_slug>/<slug:project_slug>/next-transcribable-asset/", + views.redirect_to_next_transcribable_asset, + name="redirect-to-next-transcribable-asset", + ), path( "<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/", views.ItemDetailView.as_view(), diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -35,6 +35,7 @@ ) from concordia.models import ( Asset, + AssetTranscriptionReservation, Campaign, Item, Project, @@ -345,6 +346,12 @@ def get_context_data(self, **kwargs): if next_asset: ctx["next_asset_url"] = next_asset.get_absolute_url() + ctx["asset_navigation"] = ( + item.asset_set.published() + .order_by("sequence") + .values_list("sequence", "slug") + ) + tag_groups = UserAssetTagCollection.objects.filter(asset__slug=asset.slug) tags = [] @@ -474,48 +481,6 @@ def review_transcription(request, *, pk): return JsonResponse({"id": transcription.pk}, status=200) -class ConcordiaAlternateAssetView(View): - """ - Class to handle when user opts to work on an alternate asset because another user is already working - on the original page - """ - - def post(self, *args, **kwargs): - """ - handle the POST request from the AJAX call in the template when user opts to work on alternate page - :param request: - :param args: - :param kwargs: - :return: alternate url the client will use to redirect to - """ - - if self.request.is_ajax(): - json_dict = json.loads(self.request.body) - campaign_slug = json_dict["campaign"] - asset_slug = json_dict["asset"] - else: - campaign_slug = self.request.POST.get("campaign", None) - asset_slug = self.request.POST.get("asset", None) - - if campaign_slug and asset_slug: - response = requests.get( - "%s://%s/ws/campaign_asset_random/%s/%s" - % ( - self.request.scheme, - self.request.get_host(), - campaign_slug, - asset_slug, - ), - cookies=self.request.COOKIES, - ) - random_asset_json_val = json.loads(response.content.decode("utf-8")) - - return HttpResponse( - "/campaigns/%s/asset/%s/" - % (campaign_slug, random_asset_json_val["slug"]) - ) - - class ContactUsView(FormView): template_name = "contact.html" form_class = ContactUsForm @@ -682,8 +647,6 @@ def reserve_asset_transcription(request, *, asset_pk): # We're relying on the database to meet our integrity requirements and since # this is called periodically we want to be fairly fast until we switch to # something like Redis. - # - # with connection.cursor() as cursor: cursor.execute( @@ -705,3 +668,40 @@ def reserve_asset_transcription(request, *, asset_pk): return HttpResponse(status=409) return HttpResponse(status=204) + + +@atomic +def redirect_to_next_transcribable_asset(request, *, campaign_slug, project_slug): + project = get_object_or_404( + Project.objects.published(), campaign__slug=campaign_slug, slug=project_slug + ) + + if not request.user.is_authenticated: + user = get_anonymous_user() + else: + user = request.user + + potential_assets = Asset.objects.select_for_update(skip_locked=True, of=("self",)) + potential_assets = potential_assets.filter( + item__project=project, transcription_status=TranscriptionStatus.EDIT + ) + potential_assets = potential_assets.filter(assettranscriptionreservation=None) + + for potential_asset in potential_assets: + res = AssetTranscriptionReservation(user=user, asset=potential_asset) + res.full_clean() + res.save() + return redirect( + "transcriptions:asset-detail", + project.campaign.slug, + project.slug, + potential_asset.item.item_id, + potential_asset.slug, + ) + else: + messages.info( + request, "There are no remaining pages to be transcribed in this project!" + ) + return redirect( + "transcriptions:project-detail", project.campaign.slug, project.slug + )
Moving users to the next action after finish/approve transcription
This will be part of #337 as per @jbresner's wireframe
2018-10-12T17:47:57
LibraryOfCongress/concordia
396
LibraryOfCongress__concordia-396
[ "285" ]
287c48bcd3ac3bbb9b2f352808da4e5eed0eee0e
diff --git a/concordia/forms.py b/concordia/forms.py --- a/concordia/forms.py +++ b/concordia/forms.py @@ -42,27 +42,16 @@ def clean_email(self): class ContactUsForm(forms.Form): referrer = forms.CharField(label="Referring Page", widget=forms.HiddenInput()) - email = forms.EmailField(label="Your email", required=True) - subject = forms.CharField(label="Subject", required=False) + email = forms.EmailField(label="Your email:", required=True) + subject = forms.CharField(label="Subject:", required=False) - category = forms.CharField( - label="Category", - required=True, - widget=forms.Select( - choices=( - ("General", "General"), - ("Campaign", "Question about campaign"), - ("Problem", "Something is not working"), - ) - ), - ) link = forms.URLField( - label="Link to the page you need support with", required=False + label="Have a specific page you need help with? Add the link below:", required=False ) story = forms.CharField( - label="Why are you contacting us", required=True, widget=forms.Textarea + label="Let us know how we can help:", required=True, widget=forms.Textarea )
Refine Contact Us page **What behavior did you observe? Please describe the bug** - The language is very direct. Need to make it more user friendly. - Add content to get to History Hub - Begin with options to self-service in Help Center or History Hub **What is the expected behavior?** As a user, I want to contact a community manager with a pressing question via the Contact Us form so that I can get a swift reply. It should also include: - Friendlier language for the different fields - Contact Us form would autopopulate my email and tell CM what page they are referring to - I should be able to categorize my question in the Contact Us form to best route the question. - Some information telling me about History Hub **Got screenshots? This helps us identify the issue** ![contact](https://user-images.githubusercontent.com/7362915/46110939-4114f780-c1b3-11e8-9504-3e9358f6cfcb.PNG) **Additional context** Add any other context about the problem here.
@elainekamlley you can send a pull request against https://github.com/LibraryOfCongress/concordia/blob/21bc9c11817cd95683f818e38cae7c6bac646b56/concordia/forms.py#L94-L130 with label changes.
2018-10-15T20:46:19
LibraryOfCongress/concordia
400
LibraryOfCongress__concordia-400
[ "398" ]
18418f3a9dce4109714ec37d931c2fb7caa72769
diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -23,7 +23,7 @@ from django.utils.timezone import now from django.views.decorators.cache import never_cache from django.views.decorators.http import require_POST -from django.views.generic import DetailView, FormView, ListView, TemplateView, View +from django.views.generic import DetailView, FormView, ListView, TemplateView from django_registration.backends.activation.views import RegistrationView from concordia.forms import ( @@ -102,13 +102,19 @@ def static_page(request, base_name=None): html = md.convert(f.read()) page_title = md.Meta.get("title") - if page_title: page_title = "\n".join(i.strip() for i in page_title) else: page_title = base_name.replace("-", " ").replace("/", " — ").title() - ctx = {"body": html, "title": page_title} + breadcrumbs = [] + path_components = request.path.strip("/").split("/") + for i, segment in enumerate(path_components, start=1): + breadcrumbs.append( + ("/%s/" % "/".join(path_components[0:i]), segment.replace("-", " ").title()) + ) + + ctx = {"body": html, "title": page_title, "breadcrumbs": breadcrumbs} return render(request, "static-page.html", ctx) @@ -321,8 +327,9 @@ def get_context_data(self, **kwargs): transcription = asset.transcription_set.order_by("-pk").first() ctx["transcription"] = transcription - # We'll handle the case where an item with no transcriptions should be shown as status=edit here - # so the logic doesn't need to be repeated in templates: + # We'll handle the case where an item with no transcriptions should be + # shown as status=edit here so the logic doesn't need to be repeated in + # templates: if transcription: transcription_status = transcription.status.lower() else:
Add tooltips to breadcrumbs Currently we truncate the breadcrumb text to avoid wrapping but it would be nice to present the title as a tooltip on the link in case someone wants to see the full value.
2018-10-16T18:39:41
LibraryOfCongress/concordia
401
LibraryOfCongress__concordia-401
[ "398" ]
18418f3a9dce4109714ec37d931c2fb7caa72769
diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -23,7 +23,7 @@ from django.utils.timezone import now from django.views.decorators.cache import never_cache from django.views.decorators.http import require_POST -from django.views.generic import DetailView, FormView, ListView, TemplateView, View +from django.views.generic import DetailView, FormView, ListView, TemplateView from django_registration.backends.activation.views import RegistrationView from concordia.forms import ( @@ -108,7 +108,14 @@ def static_page(request, base_name=None): else: page_title = base_name.replace("-", " ").replace("/", " — ").title() - ctx = {"body": html, "title": page_title} + breadcrumbs = [] + path_components = request.path.strip("/").split("/") + for i, segment in enumerate(path_components, start=1): + breadcrumbs.append( + ("/%s/" % "/".join(path_components[0:i]), segment.replace("-", " ").title()) + ) + + ctx = {"body": html, "title": page_title, "breadcrumbs": breadcrumbs} return render(request, "static-page.html", ctx) @@ -321,8 +328,9 @@ def get_context_data(self, **kwargs): transcription = asset.transcription_set.order_by("-pk").first() ctx["transcription"] = transcription - # We'll handle the case where an item with no transcriptions should be shown as status=edit here - # so the logic doesn't need to be repeated in templates: + # We'll handle the case where an item with no transcriptions should be + # shown as status=edit here so the logic doesn't need to be repeated in + # templates: if transcription: transcription_status = transcription.status.lower() else:
Add tooltips to breadcrumbs Currently we truncate the breadcrumb text to avoid wrapping but it would be nice to present the title as a tooltip on the link in case someone wants to see the full value.
2018-10-16T19:08:05
LibraryOfCongress/concordia
436
LibraryOfCongress__concordia-436
[ "389" ]
6b393b29559506a112490121781aa42e7af64cc7
diff --git a/concordia/urls.py b/concordia/urls.py --- a/concordia/urls.py +++ b/concordia/urls.py @@ -69,6 +69,7 @@ path("help-center/how-to-tag/", views.static_page, name="how-to-tag"), path("for-educators/", views.static_page, name="for-educators"), path("latest/", views.static_page, name="latest"), + path("questions/", views.static_page, name="questions"), path("contact/", views.ContactUsView.as_view(), name="contact"), path("campaigns/", include(tx_urlpatterns, namespace="transcriptions")), path(
Questions page to various help links As a user, when I click `Questions?` button in the transcription, I am taken to a page where I am given choices based on what I need help with. I can see three boxes that link out to: - Discussion forum: I want to talk about what I am finding - Help Center: I want to know how to do something - Contact Us: I have a specific question to a Community manager This is a self guided routing page for users to determine how they would like to get help. Acceptance criteria: - create a new static page title `questions.md` - take the styling of the guide cards in the Help center and link out to: History Hub, Help Center, contact us page ![mymcards](https://user-images.githubusercontent.com/7362915/46969720-fa653f80-d084-11e8-8f2c-d89329169a77.PNG)
2018-10-19T15:15:19
LibraryOfCongress/concordia
451
LibraryOfCongress__concordia-451
[ "443" ]
80b5d3d35edf8628c539fce3cabd2d54bfe8ff42
diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -330,23 +330,32 @@ def apply_asset_filters(self, asset_qs): def get_context_data(self, **kwargs): res = super().get_context_data(**kwargs) + asset_count = self.item.asset_set.published().count() + # We'll collect some extra stats for the progress bar. We can reuse the values # which are calculated for the transcription status filters but that displays # items as open for edit whether or not anyone has started transcribing them. # For the progress bar, we'll only count the records which have at least one - # transcription, no matter how far along it is: + # transcription, no matter how far along it is, so we need to make a separate + # query to get the number of transcriptions along with unique users: - contributors = Transcription.objects.filter(asset__item=self.item).aggregate( - Count("user", distinct=True), Count("asset", distinct=True) + trans_counts = Transcription.objects.filter(asset__item=self.item).aggregate( + user=Count("user", distinct=True), asset=Count("asset", distinct=True) ) - asset_count = len(self.object_list) if asset_count: - in_progress_percent = round( - 100 * (contributors["asset__count"] / asset_count) + edit_percent = round(100 * trans_counts["asset"] / asset_count) + status_counts = self.transcription_status_counts + submitted_percent = round( + 100 * status_counts.get("submitted", 0) / asset_count + ) + completed_percent = round( + 100 * status_counts.get("completed", 0) / asset_count ) else: - in_progress_percent = 0 + edit_percent = 0 + submitted_percent = 0 + completed_percent = 0 res.update( { @@ -355,8 +364,11 @@ def get_context_data(self, **kwargs): "item": self.item, "filter_form": self.filter_form, "transcription_status_counts": self.transcription_status_counts, - "contributor_count": contributors["user__count"], - "in_progress_percent": in_progress_percent, + "contributor_count": trans_counts["user"], + "total_asset_count": asset_count, + "edit_percent": edit_percent, + "submitted_percent": submitted_percent, + "completed_percent": completed_percent, } ) return res
Progress bar and item filtering **How can we reproduce the bug?** Steps to reproduce the behavior: 1. Go to an item that has some assets open for edit, some submitted for review. 2. Observe the progress bar when filtering by all images. 3. Change the filter to open for edit and click go. 4. Change the filter to submitted for review and click go. **What behavior did you observe? Please describe the bug** 1. The progress bar changed when the filter status changed. 2. The progress bar seemed to reflect the progress of the filtered assets. **What is the expected behavior?** 1. The progress bar should reflect the status of the item, not the status of the filtered assets being displayed. **Got screenshots? This helps us identify the issue** ![screen shot 2018-10-19 at 1 30 06 pm](https://user-images.githubusercontent.com/5270415/47234169-f138e300-d3a2-11e8-83b6-286b0edefe6b.png) ![screen shot 2018-10-19 at 1 29 57 pm](https://user-images.githubusercontent.com/5270415/47234179-f85ff100-d3a2-11e8-8ee6-1ff9982c21b5.png)
Ah, yes, this is using the paginator's count as the max value
2018-10-19T20:14:51
LibraryOfCongress/concordia
463
LibraryOfCongress__concordia-463
[ "457" ]
26793c744452eee4667e96be85eb3f29a80cd1dd
diff --git a/concordia/settings_template.py b/concordia/settings_template.py --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -78,6 +78,7 @@ "raven.contrib.django.raven_compat", "maintenance_mode", "bootstrap4", + "bittersweet", "concordia.apps.ConcordiaAppConfig", "exporter", "importer",
Pagination and filtering don't work together **What behavior did you observe? Please describe the bug** The filter became unset and went to all images. **How can we reproduce the bug?** Steps to reproduce the behavior: 1. Go to an item that has several assets in open and submitted states. 2. Use the filter to only view submitted for review assets. 3. Scroll down and click page 2. **What is the expected behavior?** When I click page 2, the filter should be maintained.
2018-10-22T15:50:29
LibraryOfCongress/concordia
484
LibraryOfCongress__concordia-484
[ "462" ]
a073dd02b6cf6d865cc1121b6dc4bd314e341472
diff --git a/concordia/urls.py b/concordia/urls.py --- a/concordia/urls.py +++ b/concordia/urls.py @@ -93,6 +93,7 @@ ), path("assets/<int:asset_pk>/tags/submit/", views.submit_tags, name="submit-tags"), path("account/ajax-status/", views.ajax_session_status, name="ajax-session-status"), + path("account/ajax-messages/", views.ajax_messages, name="ajax-messages"), path( "account/register/", views.ConcordiaRegistrationView.as_view(), diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -150,7 +150,7 @@ def static_page(request, base_name=None): return render(request, "static-page.html", ctx) -@never_cache +@cache_control(private=True, no_transform=True, max_age=settings.DEFAULT_PAGE_TTL) @csrf_exempt def ajax_session_status(request): """ @@ -176,19 +176,29 @@ def ajax_session_status(request): } ) - # TODO: we should determine whether there's enough performance impact - # that it would be better to make this view cacheable and move messages - # into a separate view: - messages = [ - {"level": MESSAGE_LEVEL_NAMES[i.level], "message": i.message} - for i in get_messages(request) - ] - - res = {"username": user.username, "links": links, "messages": messages} + res = {"username": user.username, "links": links} return JsonResponse(res) +@never_cache +@login_required +@csrf_exempt +def ajax_messages(request): + """ + Returns any messages queued for the current user + """ + + return JsonResponse( + { + "messages": [ + {"level": MESSAGE_LEVEL_NAMES[i.level], "message": i.message} + for i in get_messages(request) + ] + } + ) + + @method_decorator(never_cache, name="dispatch") class ConcordiaRegistrationView(RegistrationView): form_class = UserRegistrationForm
diff --git a/concordia/tests/test_view.py b/concordia/tests/test_view.py --- a/concordia/tests/test_view.py +++ b/concordia/tests/test_view.py @@ -290,11 +290,21 @@ def test_ajax_session_status(self): self.assertIn("links", data) self.assertIn("username", data) - self.assertIn("messages", data) self.assertEqual(data["username"], self.user.username) - # The inclusion of messages means that this view cannot currently be cached: + self.assertIn("private", resp["Cache-Control"]) + + def test_ajax_messages(self): + self.login_user() + + resp = self.client.get(reverse("ajax-messages")) + data = self.assertValidJSON(resp) + + self.assertIn("messages", data) + + # This view cannot be cached because the messages would be displayed + # multiple times: self.assertIn("no-cache", resp["Cache-Control"])
Split messages into a separate AJAX call This avoids the performance impact of not being able to make our session status code cacheable: https://github.com/LibraryOfCongress/concordia/blob/3d66d95b9566609efe555a898af34e33d373e1af/concordia/views.py#L179-L181
2018-10-22T21:15:40
LibraryOfCongress/concordia
512
LibraryOfCongress__concordia-512
[ "509", "506" ]
04826064eff8a60bf8c110b4c325a62d079c0596
diff --git a/concordia/admin.py b/concordia/admin.py --- a/concordia/admin.py +++ b/concordia/admin.py @@ -5,8 +5,11 @@ from django.conf import settings from django.contrib import admin, messages from django.contrib.admin.views.decorators import staff_member_required +from django.contrib.auth.admin import UserAdmin from django.contrib.auth.decorators import permission_required +from django.contrib.auth.models import User from django.core.exceptions import ValidationError +from django.db.models import Count from django.shortcuts import get_object_or_404, render from django.template.defaultfilters import slugify, truncatechars from django.urls import path @@ -88,6 +91,24 @@ def unpublish_action(modeladmin, request, queryset): unpublish_action.short_description = "Unpublish selected" +class ConcordiaUserAdmin(UserAdmin): + list_display = UserAdmin.list_display + ("date_joined", "transcription_count") + + def get_queryset(self, request): + qs = super().get_queryset(request) + qs = qs.annotate(Count("transcription")) + return qs + + def transcription_count(self, obj): + return obj.transcription__count + + transcription_count.admin_order_field = "transcription__count" + + +admin.site.unregister(User) +admin.site.register(User, ConcordiaUserAdmin) + + @never_cache @staff_member_required @permission_required("concordia.add_campaign") @@ -441,7 +462,7 @@ class AssetAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin): "transcription_status", ) actions = (publish_action, unpublish_action) - + autocomplete_fields = ("item",) ordering = ("item__item_id", "sequence") def get_queryset(self, request): @@ -461,6 +482,11 @@ def truncated_media_url(self, obj): truncated_media_url.allow_tags = True truncated_media_url.short_description = "Media URL" + def get_readonly_fields(self, request, obj=None): + if obj: + return self.readonly_fields + ("item",) + return self.readonly_fields + def change_view(self, request, object_id, extra_context=None, **kwargs): if object_id: if extra_context is None: diff --git a/importer/admin.py b/importer/admin.py --- a/importer/admin.py +++ b/importer/admin.py @@ -83,6 +83,7 @@ def inner(obj): return value inner.short_description = field_name.replace("_", " ").title() + inner.admin_order_field = field_name return inner def __init__(self, *args, **kwargs):
As an admin, be able to sort users by recent of sign up and activity User stories: - As an admin, I would like to be able to sort users list by recency of joining crowd.loc.gov so that I can - As an admin, I would like to be able to sort users by activity Make item on asset admin change form readonly Currently this builds a giant select list and we never want to make assets movable anyway
2018-10-24T18:16:48
LibraryOfCongress/concordia
527
LibraryOfCongress__concordia-527
[ "519" ]
a922e0ab05f796688ffcaa3e9b69f82b306b9f85
diff --git a/concordia/forms.py b/concordia/forms.py --- a/concordia/forms.py +++ b/concordia/forms.py @@ -44,7 +44,7 @@ class ContactUsForm(forms.Form): ) email = forms.EmailField(label="Your email:", required=True) - subject = forms.CharField(label="Subject:", required=False) + subject = forms.CharField(label="Subject:", required=True) link = forms.URLField( label="Have a specific page you need help with? Add the link below:", diff --git a/concordia/settings_template.py b/concordia/settings_template.py --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -281,6 +281,9 @@ ROBOTS_USE_SITEMAP = False ROBOTS_USE_HOST = False +# django-bootstrap4 customization: +BOOTSTRAP4 = {"required_css_class": "form-group-required"} + # Transcription-related settings #: Number of seconds an asset reservation is valid for diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -718,9 +718,16 @@ def form_valid(self, form): html_template = loader.get_template("emails/contact_us_email.html") html_message = html_template.render(form.cleaned_data) + confirmation_template = loader.get_template( + "emails/contact_us_confirmation_email.txt" + ) + confirmation_message = confirmation_template.render(form.cleaned_data) + try: send_mail( - "Contact Us: %(subject)s" % form.cleaned_data, + "Contact {}: {}".format( + self.request.get_host(), form.cleaned_data["subject"] + ), message=text_message, html_message=html_message, from_email=settings.DEFAULT_FROM_EMAIL, @@ -741,6 +748,24 @@ def form_valid(self, form): "Your message could not be sent. Our support team has been notified.", ) + try: + send_mail( + "Contact {}: {}".format( + self.request.get_host(), form.cleaned_data["subject"] + ), + message=confirmation_message, + from_email=settings.DEFAULT_FROM_EMAIL, + recipient_list=[form.cleaned_data['email']], + ) + except SMTPException as exc: + logger.error( + "Unable to send contact message to %s: %s", + form.cleaned_data['email'], + exc, + exc_info=True, + extra={"data": form.cleaned_data}, + ) + return redirect("contact")
Improve Contact Us form Based on the first 24 hours of user emails, we should implement some usability improvements to contact us page to improve the public and community manager experience of creating and understanding emails. As a community manager, I want to have crowdsourcing automatically indicated in the subject line when I respond to users' emails. * [x] Change subject line generated by contact us form to “Contact crowd.loc.gov: [subject]” (currently is "Contact us: [subject]") As a community manager, I want every user email to have a subject line, user email address, and content. * [x] Make ~~email address~~, subject, and ~~body~~ required fields. * [x] Indicate to users which fields are required As a user, I want to understand when I have submitted an email via the contact us form. * [x] Change “save” button to “send” or "send message" As a user I want clearer form instructions * [ ] Add help text for fields
As a community manager, I want to make sure I can view replies - Automatically populate replies with cc: crowd at loc dot gov As a user I want to understand what kind of response I can expect Provide an automatic reply that indicates expected time to receive a response, like how many days, where else to find help or resources in the meantime, HH, etc “Change field text to instructions rather than just field name” — this really should be adding help text in addition to the field name. You can see the `help_text` usage on other fields in https://github.com/LibraryOfCongress/concordia/blob/master/concordia/forms.py `Automatically populate replies with cc: crowd at loc dot gov` — aren't all replies being sent with that as the from address, as was the case with the test message I sent? Re: automatic replies, we have a generic message to the user telling them that their message has been sent. What kind of response time are we comfortably promising?
2018-10-26T15:13:37
LibraryOfCongress/concordia
535
LibraryOfCongress__concordia-535
[ "517" ]
43ce37aa2aa9d043457e7480ca200d9cee7cbb22
diff --git a/concordia/settings_template.py b/concordia/settings_template.py --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -41,7 +41,7 @@ os.path.join(SITE_ROOT_DIR, "static"), ] TEMPLATE_DEBUG = False -TIME_ZONE = "UTC" +TIME_ZONE = "America/New_York" USE_I18N = True USE_L10N = True USE_TZ = True
Set site time zone to US/Eastern https://docs.djangoproject.com/en/2.1/ref/settings/#std:setting-TIME_ZONE Use Django setting to change user-facing timestamps to use US/Eastern time zone.
2018-10-29T19:07:48
LibraryOfCongress/concordia
567
LibraryOfCongress__concordia-567
[ "564" ]
106f7eafe02056208e2931d748db2864fdff908d
diff --git a/concordia/views.py b/concordia/views.py --- a/concordia/views.py +++ b/concordia/views.py @@ -822,6 +822,7 @@ def get(self, request, campaign_slug): asset_count=Count( "item__asset", filter=Q(item__published=True, item__asset__published=True), + distinct=True ) ) projects_qs = projects_qs.annotate(
"Images in this project" is incorrect on campaign report **What behavior did you observe? Please describe the bug** The number of Images in this project is incorrect. It doesn't match the actual number of published assets in the project. **How can we reproduce the bug?** Steps to reproduce the behavior: 1. Go to any campaign report. 2. In the django admin, filter the list of assets by projects in that campaign. 3. Check the control checkbox. **What is the expected behavior?** The "Images in this project" should accurately display the number of published assets per project.
2018-10-31T21:48:35
LibraryOfCongress/concordia
575
LibraryOfCongress__concordia-575
[ "536" ]
33f4baded2737cade6bb39a86348d1b70dd90615
diff --git a/concordia/settings_dev.py b/concordia/settings_dev.py --- a/concordia/settings_dev.py +++ b/concordia/settings_dev.py @@ -45,8 +45,6 @@ REGISTRATION_SALT = "django_registration" # doesn't need to be secret -ACCOUNT_ACTIVATION_DAYS = 1 # required for HMAC registration two-step-flow - INSTALLED_APPS += ["debug_toolbar"] MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] INTERNAL_IPS = ("127.0.0.1",) diff --git a/concordia/settings_prod.py b/concordia/settings_prod.py --- a/concordia/settings_prod.py +++ b/concordia/settings_prod.py @@ -82,6 +82,4 @@ REGISTRATION_SALT = "django_registration" # doesn't need to be secret -ACCOUNT_ACTIVATION_DAYS = 1 # required for HMAC registration two-step-flow - SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" diff --git a/concordia/settings_template.py b/concordia/settings_template.py --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -212,8 +212,6 @@ # Django-specific settings above ################################################################################ -ACCOUNT_ACTIVATION_DAYS = 7 - MEDIA_URL = "/media/" MEDIA_ROOT = os.path.join(SITE_ROOT_DIR, "media") @@ -245,8 +243,8 @@ STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" WHITENOISE_ROOT = os.path.join(SITE_ROOT_DIR, "static") -PASSWORD_RESET_TIMEOUT_DAYS = 1 -ACCOUNT_ACTIVATION_DAYS = 1 +PASSWORD_RESET_TIMEOUT_DAYS = 2 +ACCOUNT_ACTIVATION_DAYS = 2 REGISTRATION_OPEN = True # set to false to temporarily disable registrations MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
Template not found when account activation fails **What behavior did you observe? Please describe the bug** The log file contains error messages looking for an activation failure template that does not exist. **How can we reproduce the bug?** Steps to reproduce the behavior: 1. Register for an account 2. Wait past the amount of time when the activation link is valid. 3. Click the activation link. 4. Check the log file. **What is the expected behavior?** If an error occurs when activating someone's account, display a helpful specific error message. Log excerpt: ``` 19:29:00 [2018-10-28T19:29:00 ERROR django.request:118] Internal Server Error: /account/activate/ImVqZ2lsbHki:1gGpOB:B57BALOPAaJAQT7h0t1d4uZNIkA/ 19:29:00 Traceback (most recent call last): 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/core/handlers/exception.py", line 35, in inner 19:29:00 response = get_response(request) 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/core/handlers/base.py", line 158, in _get_response 19:29:00 response = self.process_exception_by_middleware(e, request) 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/core/handlers/base.py", line 156, in _get_response 19:29:00 response = response.render() 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/template/response.py", line 106, in render 19:29:00 self.content = self.rendered_content 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/template/response.py", line 81, in rendered_content 19:29:00 template = self.resolve_template(self.template_name) 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/template/response.py", line 63, in resolve_template 19:29:00 return select_template(template, using=self.using) 19:29:00 File "/usr/local/lib/python3.6/dist-packages/django/template/loader.py", line 47, in select_template 19:29:00 raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain) 19:29:00 django.template.exceptions.TemplateDoesNotExist: django_registration/activation_failed.html ```
We should add that template and see whether any other steps on https://django-registration.readthedocs.io/en/3.0/quickstart.html were skipped. Based on https://github.com/ubernostrum/django-registration/blob/6617463731aa15a941bde9cef9fc235563c5c3d1/src/django_registration/backends/activation/urls.py#L34 I think https://github.com/LibraryOfCongress/concordia/blob/master/concordia/templates/registration/registration_closed.html might be in the wrong directory, too. Based on the convo in slack, we should also increase the activate time to 48 hours Looks like we can remove three redundant settings since the only time it's set to a value other than one day is overridden before it's used: ``` concordia/settings_template.py 215:ACCOUNT_ACTIVATION_DAYS = 7 249:ACCOUNT_ACTIVATION_DAYS = 1 concordia/settings_dev.py 48:ACCOUNT_ACTIVATION_DAYS = 1 # required for HMAC registration two-step-flow concordia/settings_prod.py 85:ACCOUNT_ACTIVATION_DAYS = 1 # required for HMAC registration two-step-flow ``` I think we've just heard from a volunteer with a related issue: It sounds from their description as though the 24 hour verification hasn't passed, so maybe there's another problem: "Hello, I just created an account and received an email telling me to click on a link to complete my account activation. Unfortunately, the link appears to be broken. It says "HTTP 500 Error The server encountered an unexpected condition which prevented it from fulfilling the request. Our staff have been notified about the failure." Does the password reset work? If not this might be worth a quick update This is the same missing template. If @rstorey wants I'll take a quick break from CSS to toss it together. Thanks - I'm stuck in rate limit world at the moment.
2018-11-01T21:46:10
LibraryOfCongress/concordia
581
LibraryOfCongress__concordia-581
[ "278" ]
a34e9c9bac87a3f9c34c3c75346598373958b658
diff --git a/concordia/admin.py b/concordia/admin.py --- a/concordia/admin.py +++ b/concordia/admin.py @@ -16,6 +16,7 @@ from django.utils.decorators import method_decorator from django.utils.html import format_html from django.views.decorators.cache import never_cache +from tabular_export.admin import export_to_csv_action, export_to_excel_action from exporter import views as exporter_views from importer.tasks import import_items_into_project_from_url @@ -103,6 +104,7 @@ def transcription_count(self, obj): return obj.transcription__count transcription_count.admin_order_field = "transcription__count" + actions = (export_to_excel_action, export_to_csv_action) admin.site.unregister(User)
Admin export of users who signed up for newsletter **Is your feature request related to a problem? Please describe.** Admins need a way to export (in CSV format) a list of users sorted by date who opted in for the news letter when they registered. **Describe the solution you'd like** For admin export, we should test whether this requires changes for Django 2.x since it could be a simple option: https://github.com/LibraryOfCongress/django-tabular-export#admin-integration
Related to #155
2018-11-02T15:16:42
LibraryOfCongress/concordia
597
LibraryOfCongress__concordia-597
[ "580" ]
3825111f9738c3c915e0912868bf7ef0b7aafec6
diff --git a/exporter/views.py b/exporter/views.py --- a/exporter/views.py +++ b/exporter/views.py @@ -1,4 +1,5 @@ import os +import re import shutil import bagit @@ -10,29 +11,44 @@ from django.views.generic import TemplateView from tabular_export.core import export_to_csv_response, flatten_queryset -from concordia.models import Asset, Campaign, Transcription -from concordia.storage import ASSET_STORAGE +from concordia.models import Asset, Transcription + + +def get_latest_transcription_data(campaign_slug): + latest_trans_subquery = ( + Transcription.objects.filter(asset=OuterRef("pk")) + .order_by("-pk") + .values("text") + ) + assets = Asset.objects.annotate( + latest_transcription=Subquery(latest_trans_subquery[:1]) + ) + assets = assets.filter(item__project__campaign__slug=campaign_slug) + return assets + + +def get_original_asset_id(download_url): + """ + Extract the bit from the download url + that identifies this image uniquely on loc.gov + """ + if download_url.startswith("http://tile.loc.gov/"): + pattern = r"/service:([A-Za-z0-9:]*)/" + asset_id = re.search(pattern, download_url) + assert asset_id + return asset_id.group(1).replace(":", "-") + else: + return download_url class ExportCampaignToCSV(TemplateView): """ Exports the most recent transcription for each asset in a campaign - """ @method_decorator(login_required) def get(self, request, *args, **kwargs): - latest_trans_subquery = ( - Transcription.objects.filter(asset=OuterRef("pk")) - .order_by("-pk") - .values("text") - ) - assets = Asset.objects.annotate( - latest_transcription=Subquery(latest_trans_subquery[:1]) - ) - assets = assets.filter( - item__project__campaign__slug=self.kwargs["campaign_slug"] - ) + assets = get_latest_transcription_data(self.kwargs["campaign_slug"]) headers, data = flatten_queryset( assets, @@ -64,68 +80,35 @@ def get(self, request, *args, **kwargs): ) +# FIXME: we should be able to export at the project and item level, too class ExportCampaignToBagit(TemplateView): """ - Creates temp directory structure for source data. Copies source image - file from S3 or local storage into temp directory, builds export.csv - with meta, transcription, and tag data. Executes bagit.py to turn temp - directory into bagit strucutre. Builds and exports bagit structure as - zip. Removes all temporary directories and files. - + Creates temp directory structure for source data. + Executes bagit.py to turn temp directory into bagit strucutre. + Builds and exports bagit structure as zip. + Removes all temporary directories and files. """ - include_images = True - template_name = "transcriptions/campaign.html" - @method_decorator(login_required) def get(self, request, *args, **kwargs): - campaign = Campaign.objects.get(slug=self.kwargs["campaign_slug"]) - asset_list = Asset.objects.filter(item__project__campaign=campaign).order_by( - "title", "sequence" - ) + campaign_slug = self.kwargs["campaign_slug"] + assets = get_latest_transcription_data(campaign_slug) - # FIXME: this code should be working in a separate path than the media root! - # FIXME: we should be able to export at the project and item level, too - export_base_dir = os.path.join(settings.MEDIA_ROOT, "exporter", campaign.slug) - - for asset in asset_list: - src = os.path.join( - settings.MEDIA_ROOT, - asset.item.project.campaign.slug, - asset.item.project.slug, - asset.item.item_id, - asset.slug, - asset.media_url, - ) + export_base_dir = os.path.join(settings.SITE_ROOT_DIR, "tmp", campaign_slug) + + for asset in assets: dest_folder = os.path.join( - export_base_dir, asset.item.project.slug, asset.item.item_id, asset.slug + export_base_dir, asset.item.project.slug, asset.item.item_id ) os.makedirs(dest_folder, exist_ok=True) - dest = os.path.join(dest_folder, asset.media_url) - - if self.include_images: - with open(dest, mode="wb") as dest_file: - with ASSET_STORAGE.open(src, mode="rb") as src_file: - for chunk in src_file.chunks(1048576): - dest_file.write(chunk) - - # Get transcription data - # FIXME: if we're not including all transcriptions, - # we should pick the completed or latest versions! - - try: - transcription = Transcription.objects.get( - asset=asset, user=self.request.user - ).text - except Transcription.DoesNotExist: - transcription = "" # Build transcription output text file - tran_output_path = os.path.join( - dest_folder, "%s.txt" % os.path.basename(asset.media_url) + text_output_path = os.path.join( + dest_folder, + "%s.txt" % os.path.basename(get_original_asset_id(asset.download_url)), ) - with open(tran_output_path, "w") as f: - f.write(transcription) + with open(text_output_path, "w") as f: + f.write(asset.latest_transcription or "") # Turn Structure into bagit format bagit.make_bag(export_base_dir, {"Contact-Name": request.user.username}) @@ -137,7 +120,9 @@ def get(self, request, *args, **kwargs): # Download zip with open("%s.zip" % export_base_dir, "rb") as zip_file: response = HttpResponse(zip_file, content_type="application/zip") - response["Content-Disposition"] = "attachment; filename=%s.zip" % campaign.slug + response["Content-Disposition"] = "attachment; filename=%s.zip" % campaign_slug + + # Upload zip to S3 bucket # Clean up temp folders & zipfile once exported shutil.rmtree(export_base_dir)
diff --git a/exporter/tests/test_view.py b/exporter/tests/test_view.py --- a/exporter/tests/test_view.py +++ b/exporter/tests/test_view.py @@ -1,10 +1,6 @@ import io -import os import zipfile -from django.conf import settings -from django.core.files.base import ContentFile -from django.core.files.storage import default_storage from django.test import TestCase from django.urls import reverse @@ -16,6 +12,11 @@ create_project, ) +DOWNLOAD_URL = ( + "http://tile.loc.gov/image-services/iiif/" + "service:mss:mal:003:0036300:002/full/pct:25/0/default.jpg" +) + class ViewTest_Exporter(TestCase): """ @@ -24,45 +25,13 @@ class ViewTest_Exporter(TestCase): Make sure the postgresql db is available. Run docker-compose up db """ - def login_user(self): - """ - Create a user and log the user in - """ - - self.user = User.objects.create(username="tester", email="[email protected]") - self.user.set_password("top_secret") - self.user.save() + def setUp(self): + user = User.objects.create(username="tester", email="[email protected]") + user.set_password("top_secret") + user.save() self.assertTrue(self.client.login(username="tester", password="top_secret")) - def test_csv_export(self): - """ - Test GET route /campaigns/export/<slug-value>/ (campaign) - """ - self.login_user() - - asset = create_asset() - - response = self.client.get( - reverse( - "transcriptions:export-csv", args=(asset.item.project.campaign.slug,) - ) - ) - - self.assertEqual(response.status_code, 200) - self.assertEqual( - response.content.decode("utf-8"), - "Campaign,Title,Description,MediaUrl,Transcription,Tags\r\n" - "Test Campaign,Test Asset,,1.jpg,,\r\n", - ) - - def test_bagit_export(self): - """ - Test the http GET on route /campaigns/exportBagit/<campaignname>/ - """ - - self.login_user() - campaign = create_campaign(published=True) project = create_project(campaign=campaign, published=True) item = create_item(project=project, published=True) @@ -71,33 +40,56 @@ def test_bagit_export(self): item=item, title="TestAsset", description="Asset Description", - media_url="1.jpg", + download_url=DOWNLOAD_URL, media_type=MediaType.IMAGE, sequence=1, ) # add a Transcription object - transcription1 = Transcription(asset=asset, user=self.user, text="Sample") + transcription1 = Transcription(asset=asset, user=user, text="Sample") transcription1.full_clean() transcription1.save() - item_dir = os.path.join( - settings.MEDIA_ROOT, campaign.slug, project.slug, item.item_id, asset.slug + def test_csv_export(self): + """ + Test GET route /campaigns/exportCSV/<slug-value>/ (campaign) + """ + + campaign_slug = "test-campaign" + + response = self.client.get( + reverse("transcriptions:export-csv", args=(campaign_slug,)) ) - asset_file = ContentFile(b"Not a real JPEG") - default_storage.save( - os.path.join(item_dir, f"{asset.sequence}.jpg"), asset_file + expected_response_content = ( + "b'Campaign,Project,Item,ItemId,Asset," + "AssetStatus,DownloadUrl,Transcription\\r\\n'" + "b'Test Campaign,Test Project,Test Item," + "testitem0123456789,TestAsset,edit," + "http://tile.loc.gov/image-services/" + "iiif/service:mss:mal:003:0036300:002/full" + "/pct:25/0/default.jpg,Sample\\r\\n'" ) + self.assertEqual(response.status_code, 200) + response_content = "".join(map(str, response.streaming_content)) + self.assertEqual(response_content, expected_response_content) + + def test_bagit_export(self): + """ + Test the http GET on route /campaigns/exportBagit/<campaignname>/ + """ + + campaign_slug = "test-campaign" + response = self.client.get( - reverse("transcriptions:export-bagit", args=(campaign.slug,)) + reverse("transcriptions:export-bagit", args=(campaign_slug,)) ) self.assertEqual(response.status_code, 200) self.assertEquals( response.get("Content-Disposition"), - "attachment; filename=%s.zip" % campaign.slug, + "attachment; filename=%s.zip" % campaign_slug, ) f = io.BytesIO(response.content) @@ -106,6 +98,6 @@ def test_bagit_export(self): self.assertIn("bagit.txt", zipped_file.namelist()) self.assertIn("bag-info.txt", zipped_file.namelist()) self.assertIn( - "data/test-project/testitem0123456789/testasset/1.jpg", + "data/test-project/testitem0123456789/mss-mal-003-0036300-002.txt", zipped_file.namelist(), )
Refactor exporter - look at past tickets and start prelim tests
Do we want to have streaming bag creation as a goal for this? I'm thinking that might be more hassle than its worth we just use another S3 bucket to assemble images but I think that also raises the question of whether we should bother with media at all vs. just the metadata. For now, we just need the transcription data. No images yet. That'll keep file sizes manageable — we probably just just be storing IDs & URLs anyway. One thing I remember from the last time I looked at the code was that it didn't seem well-defined how we handled things which weren't complete. Do we have a policy decision from @colefemeno about whether that means we need to exclude uncompleted items, expose the status, etc.?
2018-11-06T19:59:17
LibraryOfCongress/concordia
600
LibraryOfCongress__concordia-600
[ "392" ]
972478fb15ca563b0d99d5d291496f38780be70e
diff --git a/concordia/management/commands/ensure_initial_site_configuration.py b/concordia/management/commands/ensure_initial_site_configuration.py new file mode 100644 --- /dev/null +++ b/concordia/management/commands/ensure_initial_site_configuration.py @@ -0,0 +1,81 @@ +""" +Ensure that our basic site configuration has been applied + +This is intended for automated scenarios such as a fresh database server should +be configured on first run but a newly-launched container should not make any +changes. For convenience with Docker, the default values for each command-line +argument will be retrieved from the environment. + +Tasks: +1. Ensure that at least one admin user account exists. If not, a new one will be + created but it will have an unusable password to force use of the password + reset process. +2. Ensure that the Sites framework has the intended site name & domain +""" + +import os + +from django.contrib.auth.models import User +from django.contrib.sites.models import Site +from django.core.management.base import BaseCommand +from django.db.transaction import atomic + + +class Command(BaseCommand): + help = "Ensure that core site configuration has been applied" + + def add_arguments(self, parser): + parser.add_argument( + "--admin-username", + default=os.environ.get("CONCORDIA_ADMIN_USERNAME", "admin"), + help="Admin user's username (default=%(default)s)", + ) + parser.add_argument( + "--admin-email", + default=os.environ.get("CONCORDIA_ADMIN_EMAIL", "[email protected]"), + help="Admin user's email address (default=%(default)s)", + ) + parser.add_argument( + "--site-name", + default=os.environ.get("HOST_NAME", "example.com"), + help="Site name (default=%(default)s)", + ) + parser.add_argument( + "--site-domain", + default=os.environ.get("HOST_NAME", "example.com"), + help="Site domain (default=%(default)s)", + ) + + @atomic + def handle(self, *, admin_username, admin_email, site_name, site_domain, **options): + user, user_created = User.objects.get_or_create( + username=admin_username, defaults={"email": admin_email} + ) + user.is_staff = user.is_superuser = True + + if user.email != admin_email: + self.stdout.write( + f"Changing {admin_username} email from {user.email} to {admin_email}" + ) + user.email = admin_email + + if user_created: + user.set_unusable_password() + + user.full_clean() + user.save() + + if user_created: + self.stdout.write( + f"Created superuser {admin_username} account for {admin_email}." + " Use the password reset form to change the unusable password." + ) + + if site_domain != "example.com": + updated = Site.objects.filter(domain="example.com").update( + name=site_name, domain=site_domain + ) + if updated: + self.stdout.write( + f"Configured site with name {site_name} and domain {site_domain}" + )
Update deployment to include setting site name Currently the site name defaults to `example.com` causing a misnomer in our activation email. This can be config'd manually in the admin pages but as we continuously deploy in various environments its probably best to update deployment to set the base url as the site name.
We could add this as part of a setup management command which would get the canonical URL from the environment, create the super user if none exists, and create the user groups as well.
2018-11-07T19:21:37
LibraryOfCongress/concordia
604
LibraryOfCongress__concordia-604
[ "169" ]
83f1d6bae0b944a7e7fb8caac7d2d0d7c4a19988
diff --git a/concordia/settings_dev.py b/concordia/settings_dev.py --- a/concordia/settings_dev.py +++ b/concordia/settings_dev.py @@ -19,14 +19,6 @@ CELERY_BROKER_URL = "pyamqp://guest@localhost" CELERY_RESULT_BACKEND = "rpc://" -S3_BUCKET_NAME = "concordia-staticpages" - -DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" -AWS_STORAGE_BUCKET_NAME = S3_BUCKET_NAME -AWS_DEFAULT_ACL = None # Don't set an ACL on the files, inherit the bucket ACLs - -MEDIA_URL = "https://%s.s3.amazonaws.com/" % S3_BUCKET_NAME - EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" EMAIL_FILE_PATH = "/tmp/concordia-messages" # change this to a proper location DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL", "") diff --git a/concordia/settings_prod.py b/concordia/settings_prod.py --- a/concordia/settings_prod.py +++ b/concordia/settings_prod.py @@ -50,6 +50,7 @@ CELERY_RESULT_BACKEND = "rpc://" S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME") +EXPORT_S3_BUCKET_NAME = os.getenv("EXPORT_S3_BUCKET_NAME") DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" AWS_STORAGE_BUCKET_NAME = S3_BUCKET_NAME diff --git a/exporter/views.py b/exporter/views.py --- a/exporter/views.py +++ b/exporter/views.py @@ -1,12 +1,15 @@ import os import re import shutil +import tempfile +from datetime import datetime import bagit +import boto3 from django.conf import settings -from django.contrib.auth.decorators import login_required -from django.db.models import Subquery, OuterRef -from django.http import HttpResponse +from django.contrib.admin.views.decorators import staff_member_required +from django.db.models import OuterRef, Subquery +from django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator from django.views.generic import TemplateView from tabular_export.core import export_to_csv_response, flatten_queryset @@ -14,16 +17,14 @@ from concordia.models import Asset, Transcription -def get_latest_transcription_data(campaign_slug): +def get_latest_transcription_data(asset_qs): latest_trans_subquery = ( Transcription.objects.filter(asset=OuterRef("pk")) .order_by("-pk") .values("text") ) - assets = Asset.objects.annotate( - latest_transcription=Subquery(latest_trans_subquery[:1]) - ) - assets = assets.filter(item__project__campaign__slug=campaign_slug) + + assets = asset_qs.annotate(latest_transcription=Subquery(latest_trans_subquery[:1])) return assets @@ -33,10 +34,10 @@ def get_original_asset_id(download_url): that identifies this image uniquely on loc.gov """ if download_url.startswith("http://tile.loc.gov/"): - pattern = r"/service:([A-Za-z0-9:]*)/" + pattern = r"/service:([A-Za-z0-9:\-]*)/" asset_id = re.search(pattern, download_url) assert asset_id - return asset_id.group(1).replace(":", "-") + return asset_id.group(1) else: return download_url @@ -46,9 +47,12 @@ class ExportCampaignToCSV(TemplateView): Exports the most recent transcription for each asset in a campaign """ - @method_decorator(login_required) + @method_decorator(staff_member_required) def get(self, request, *args, **kwargs): - assets = get_latest_transcription_data(self.kwargs["campaign_slug"]) + asset_qs = Asset.objects.filter( + item__project__campaign__slug=self.kwargs["campaign_slug"] + ) + assets = get_latest_transcription_data(asset_qs) headers, data = flatten_queryset( assets, @@ -86,15 +90,25 @@ class ExportCampaignToBagit(TemplateView): Creates temp directory structure for source data. Executes bagit.py to turn temp directory into bagit strucutre. Builds and exports bagit structure as zip. + Uploads zip to S3 if configured. Removes all temporary directories and files. """ - @method_decorator(login_required) + @method_decorator(staff_member_required) def get(self, request, *args, **kwargs): campaign_slug = self.kwargs["campaign_slug"] - assets = get_latest_transcription_data(campaign_slug) + asset_qs = Asset.objects.filter( + item__project__campaign__slug=campaign_slug, + transcription_status="completed", + ) + + assets = get_latest_transcription_data(asset_qs) + + with tempfile.TemporaryDirectory(prefix=campaign_slug) as export_base_dir: + return self.do_bagit_export(assets, export_base_dir, campaign_slug) - export_base_dir = os.path.join(settings.SITE_ROOT_DIR, "tmp", campaign_slug) + def do_bagit_export(self, assets, export_base_dir, campaign_slug): + os.makedirs(export_base_dir, exist_ok=True) for asset in assets: dest_folder = os.path.join( @@ -111,21 +125,33 @@ def get(self, request, *args, **kwargs): f.write(asset.latest_transcription or "") # Turn Structure into bagit format - bagit.make_bag(export_base_dir, {"Contact-Name": request.user.username}) + bagit.make_bag(export_base_dir) # Build .zip file of bagit formatted Campaign Folder archive_name = export_base_dir shutil.make_archive(archive_name, "zip", export_base_dir) - # Download zip - with open("%s.zip" % export_base_dir, "rb") as zip_file: - response = HttpResponse(zip_file, content_type="application/zip") - response["Content-Disposition"] = "attachment; filename=%s.zip" % campaign_slug - # Upload zip to S3 bucket + s3_bucket = getattr(settings, "EXPORT_S3_BUCKET_NAME", None) + export_filename = "%s-%s.zip" % ( + campaign_slug, + datetime.today().isoformat(timespec="minutes"), + ) - # Clean up temp folders & zipfile once exported - shutil.rmtree(export_base_dir) - os.remove("%s.zip" % export_base_dir) + if s3_bucket: + s3 = boto3.resource("s3") + s3.Bucket(s3_bucket).upload_file( + "%s.zip" % export_base_dir, "%s" % export_filename + ) - return response + return HttpResponseRedirect( + "https://%s.s3.amazonaws.com/%s" % (s3_bucket, export_filename) + ) + else: + # Download zip from local storage + with open("%s.zip" % export_base_dir, "rb") as zip_file: + response = HttpResponse(zip_file, content_type="application/zip") + response["Content-Disposition"] = ( + "attachment; filename=%s" % export_filename + ) + return response
diff --git a/exporter/tests/test_view.py b/exporter/tests/test_view.py --- a/exporter/tests/test_view.py +++ b/exporter/tests/test_view.py @@ -1,5 +1,6 @@ import io import zipfile +from datetime import datetime from django.test import TestCase from django.urls import reverse @@ -26,7 +27,9 @@ class ViewTest_Exporter(TestCase): """ def setUp(self): - user = User.objects.create(username="tester", email="[email protected]") + user = User.objects.create( + username="tester", email="[email protected]", is_staff=True + ) user.set_password("top_secret") user.save() @@ -46,7 +49,13 @@ def setUp(self): ) # add a Transcription object - transcription1 = Transcription(asset=asset, user=user, text="Sample") + transcription1 = Transcription( + asset=asset, + user=user, + text="Sample", + submitted=datetime.now(), + accepted=datetime.now(), + ) transcription1.full_clean() transcription1.save() @@ -65,7 +74,7 @@ def test_csv_export(self): "b'Campaign,Project,Item,ItemId,Asset," "AssetStatus,DownloadUrl,Transcription\\r\\n'" "b'Test Campaign,Test Project,Test Item," - "testitem0123456789,TestAsset,edit," + "testitem0123456789,TestAsset,completed," "http://tile.loc.gov/image-services/" "iiif/service:mss:mal:003:0036300:002/full" "/pct:25/0/default.jpg,Sample\\r\\n'" @@ -87,9 +96,14 @@ def test_bagit_export(self): ) self.assertEqual(response.status_code, 200) + + export_filename = "%s-%s.zip" % ( + campaign_slug, + datetime.today().isoformat(timespec="minutes"), + ) self.assertEquals( response.get("Content-Disposition"), - "attachment; filename=%s.zip" % campaign_slug, + "attachment; filename=%s" % export_filename, ) f = io.BytesIO(response.content) @@ -98,6 +112,6 @@ def test_bagit_export(self): self.assertIn("bagit.txt", zipped_file.namelist()) self.assertIn("bag-info.txt", zipped_file.namelist()) self.assertIn( - "data/test-project/testitem0123456789/mss-mal-003-0036300-002.txt", + "data/test-project/testitem0123456789/mss:mal:003:0036300:002.txt", zipped_file.namelist(), )
Update Exporter config to include original image file name To make ETL simple, the export of the transcription data needs to match the original format of the image name and directory name.
2018-11-08T21:42:29
google/jax
41
google__jax-41
[ "33" ]
38c7a07248fbccf5f9a8bde5cd88972c747c52aa
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -692,7 +692,9 @@ def broadcasting_shape_rule(name, *avals): if len({len(shape) for shape in shapes}) != 1: msg = '{} got arrays of different rank: {}.' raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes))))) - result_shape = onp.max(shapes, axis=0) + min_shape = onp.min(shapes, axis=0) + max_shape = onp.max(shapes, axis=0) + result_shape = onp.where(min_shape == 0, 0, max_shape) if not onp.all((shapes == result_shape) | (shapes == 1)): msg = '{} got incompatible shapes for broadcasting: {}.' raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes))))) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -113,7 +113,9 @@ def _broadcast_shapes(*shapes): return shapes[0] ndim = _max(len(shape) for shape in shapes) shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes]) - result_shape = onp.max(shapes, axis=0) + min_shape = onp.min(shapes, axis=0) + max_shape = onp.max(shapes, axis=0) + result_shape = onp.where(min_shape == 0, 0, max_shape) if not onp.all((shapes == result_shape) | (shapes == 1)): raise ValueError("Incompatible shapes for broadcasting: {}" .format(tuple(map(tuple, shapes))))
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -274,8 +274,11 @@ def rand_some_equal(): rng = npr.RandomState(0) def post(x): + x_ravel = x.ravel() + if len(x_ravel) == 0: + return x flips = rng.rand(*onp.shape(x)) < 0.5 - return onp.where(flips, x.ravel()[0], x) + return onp.where(flips, x_ravel[0], x) return partial(_rand_dtype, randn, scale=100., post=post) diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -33,9 +33,13 @@ config.parse_flags_with_absl() FLAGS = config.FLAGS -array_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)] +nonempty_array_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)] +empty_array_shapes = [(0,), (0, 4), (3, 0),] -all_shapes = [jtu.NUMPY_SCALAR_SHAPE] + array_shapes +scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE] +array_shapes = nonempty_array_shapes + empty_array_shapes +nonempty_shapes = scalar_shapes + nonempty_array_shapes +all_shapes = scalar_shapes + array_shapes float_dtypes = [onp.float32, onp.float64] complex_dtypes = [onp.complex64] @@ -46,90 +50,91 @@ numeric_dtypes = float_dtypes + complex_dtypes + int_dtypes -OpRecord = collections.namedtuple("OpRecord", ["name", "nargs", "dtypes", "rng", - "diff_modes", "test_name"]) +OpRecord = collections.namedtuple( + "OpRecord", + ["name", "nargs", "dtypes", "shapes", "rng", "diff_modes", "test_name"]) -def op_record(name, nargs, dtypes, rng, diff_modes, test_name=None): +def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None): test_name = test_name or name - return OpRecord(name, nargs, dtypes, rng, diff_modes, test_name) + return OpRecord(name, nargs, dtypes, shapes, rng, diff_modes, test_name) JAX_ONE_TO_ONE_OP_RECORDS = [ - op_record("abs", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("add", 2, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("ceil", 1, float_dtypes, jtu.rand_default(), []), - op_record("conj", 1, numeric_dtypes, jtu.rand_default(), ["rev"]), - op_record("conjugate", 1, numeric_dtypes, jtu.rand_default(), ["rev"]), - op_record("equal", 2, default_dtypes, jtu.rand_some_equal(), []), - op_record("exp", 1, numeric_dtypes, jtu.rand_default(), ["rev"]), - op_record("floor", 1, float_dtypes, jtu.rand_default(), []), - op_record("greater", 2, default_dtypes, jtu.rand_some_equal(), []), - op_record("greater_equal", 2, default_dtypes, jtu.rand_some_equal(), []), - op_record("less", 2, default_dtypes, jtu.rand_some_equal(), []), - op_record("less_equal", 2, default_dtypes, jtu.rand_some_equal(), []), - op_record("log", 1, numeric_dtypes, jtu.rand_positive(), ["rev"]), - op_record("logical_and", 2, default_dtypes, jtu.rand_bool(), []), - op_record("logical_not", 1, default_dtypes, jtu.rand_bool(), []), - op_record("logical_or", 2, default_dtypes, jtu.rand_bool(), []), - op_record("logical_xor", 2, default_dtypes, jtu.rand_bool(), []), - op_record("maximum", 2, default_dtypes, jtu.rand_some_inf(), []), - op_record("minimum", 2, default_dtypes, jtu.rand_some_inf(), []), - op_record("multiply", 2, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("negative", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("not_equal", 2, default_dtypes, jtu.rand_some_equal(), ["rev"]), - op_record("power", 2, float_dtypes, jtu.rand_positive(), ["rev"]), - op_record("subtract", 2, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("tanh", 1, numeric_dtypes, jtu.rand_default(), ["rev"]), - op_record("sin", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("cos", 1, default_dtypes, jtu.rand_default(), ["rev"]), + op_record("abs", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("add", 2, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default(), []), + op_record("conj", 1, numeric_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("conjugate", 1, numeric_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("equal", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), []), + #op_record("exp", 1, numeric_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default(), []), + op_record("greater", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), []), + op_record("greater_equal", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), []), + op_record("less", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), []), + op_record("less_equal", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), []), + op_record("log", 1, numeric_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), + op_record("logical_and", 2, default_dtypes, all_shapes, jtu.rand_bool(), []), + op_record("logical_not", 1, default_dtypes, all_shapes, jtu.rand_bool(), []), + op_record("logical_or", 2, default_dtypes, all_shapes, jtu.rand_bool(), []), + op_record("logical_xor", 2, default_dtypes, all_shapes, jtu.rand_bool(), []), + op_record("maximum", 2, default_dtypes, all_shapes, jtu.rand_some_inf(), []), + op_record("minimum", 2, default_dtypes, all_shapes, jtu.rand_some_inf(), []), + op_record("multiply", 2, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("negative", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("not_equal", 2, default_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]), + #op_record("power", 2, float_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), + op_record("subtract", 2, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("tanh", 1, numeric_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("sin", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("cos", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), ] JAX_COMPOUND_OP_RECORDS = [ - op_record("cosh", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("divide", 2, default_dtypes, jtu.rand_nonzero(), ["rev"]), - op_record("expm1", 1, numeric_dtypes, jtu.rand_positive(), [], + op_record("cosh", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("divide", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]), + op_record("expm1", 1, numeric_dtypes, all_shapes, jtu.rand_positive(), [], test_name="expm1_large"), - op_record("expm1", 1, numeric_dtypes, jtu.rand_small_positive(), []), - op_record("floor_divide", 2, default_dtypes, jtu.rand_nonzero(), ["rev"]), - op_record("isclose", 2, float_dtypes, jtu.rand_small_positive(), []), - op_record("log1p", 1, numeric_dtypes, jtu.rand_positive(), [], + op_record("expm1", 1, numeric_dtypes, all_shapes, jtu.rand_small_positive(), []), + op_record("floor_divide", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]), + op_record("isclose", 2, float_dtypes, all_shapes, jtu.rand_small_positive(), []), + op_record("log1p", 1, numeric_dtypes, all_shapes, jtu.rand_positive(), [], test_name="log1p_large"), - op_record("log1p", 1, numeric_dtypes, jtu.rand_small_positive(), []), - op_record("logaddexp", 2, float_dtypes, jtu.rand_default(), ["rev"]), - op_record("ravel", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("remainder", 2, default_dtypes, jtu.rand_nonzero(), []), - op_record("sinh", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("sqrt", 1, default_dtypes, jtu.rand_positive(), ["rev"]), - op_record("transpose", 1, default_dtypes, jtu.rand_default(), ["rev"]), - op_record("true_divide", 2, default_dtypes, jtu.rand_nonzero(), ["rev"]), - op_record("where", 3, (onp.float32, onp.int64), jtu.rand_some_zero(), []), + op_record("log1p", 1, numeric_dtypes, all_shapes, jtu.rand_small_positive(), []), + op_record("logaddexp", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("ravel", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), + op_record("sinh", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("sqrt", 1, default_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), + op_record("transpose", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("true_divide", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]), + op_record("where", 3, (onp.float32, onp.int64), all_shapes, jtu.rand_some_zero(), []), ] JAX_BITWISE_OP_RECORDS = [ - op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, + op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes, jtu.rand_bool(), []), - op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, + op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes, jtu.rand_bool(), []), - op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, + op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes, jtu.rand_bool(), []), - op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, + op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes, jtu.rand_bool(), []), ] JAX_REDUCER_RECORDS = [ - op_record("all", 1, bool_dtypes, jtu.rand_default(), []), - op_record("any", 1, bool_dtypes, jtu.rand_default(), []), - op_record("max", 1, default_dtypes, jtu.rand_default(), []), - op_record("mean", 1, default_dtypes, jtu.rand_default(), []), - op_record("min", 1, default_dtypes, jtu.rand_default(), []), - op_record("prod", 1, default_dtypes, jtu.rand_small_positive(), []), - op_record("sum", 1, default_dtypes, jtu.rand_default(), []), - op_record("var", 1, default_dtypes, jtu.rand_default(), []), + op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_default(), []), + op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_default(), []), + op_record("max", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), + #op_record("mean", 1, default_dtypes, all_shapes, jtu.rand_default(), []), + op_record("min", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), + #op_record("prod", 1, default_dtypes, all_shapes, jtu.rand_small_positive(), []), + op_record("sum", 1, default_dtypes, all_shapes, jtu.rand_default(), []), + #op_record("var", 1, default_dtypes, all_shapes, jtu.rand_default(), []), ] JAX_ARGMINMAX_RECORDS = [ - op_record("argmin", 1, default_dtypes, jtu.rand_some_equal(), []), - op_record("argmax", 1, default_dtypes, jtu.rand_some_equal(), []), + op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal(), []), + op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal(), []), ] CombosWithReplacement = itertools.combinations_with_replacement @@ -150,6 +155,15 @@ def _dtypes_are_compatible_for_bitwise_ops(args): or (width(x) == 32 and width(y) == 32) or (width(x) == 32 and width(y) == 64 and is_signed(y))) +def _shapes_are_broadcast_compatible(shapes): + accumulator = onp.zeros([]) + for shape in shapes: + try: + accumulator = accumulator + onp.zeros(shape) + except ValueError: + return False + return True + class LaxBackedNumpyTests(jtu.JaxTestCase): """Tests for LAX-backed Numpy implementation.""" @@ -164,7 +178,9 @@ def _GetArgsMaker(self, rng, shapes, dtypes): "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS, JAX_COMPOUND_OP_RECORDS) - for shapes in CombosWithReplacement(all_shapes, rec.nargs) + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(rec.shapes, rec.nargs)) for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))) def testOp(self, onp_op, lnp_op, rng, shapes, dtypes): args_maker = self._GetArgsMaker(rng, shapes, dtypes) @@ -177,10 +193,12 @@ def testOp(self, onp_op, lnp_op, rng, shapes, dtypes): "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} for rec in JAX_BITWISE_OP_RECORDS - for shapes in CombosWithReplacement(all_shapes, rec.nargs) + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(rec.shapes, rec.nargs)) for dtypes in filter( - _dtypes_are_compatible_for_bitwise_ops, - CombosWithReplacement(rec.dtypes, rec.nargs)))) + _dtypes_are_compatible_for_bitwise_ops, + CombosWithReplacement(rec.dtypes, rec.nargs)))) def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes): if not FLAGS.jax_enable_x64 and any( onp.iinfo(dtype).bits == 64 for dtype in dtypes): @@ -197,7 +215,7 @@ def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes): "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name), "axis": axis, "keepdims": keepdims} for rec in JAX_REDUCER_RECORDS - for shape in all_shapes for dtype in rec.dtypes + for shape in rec.shapes for dtype in rec.dtypes for axis in range(-len(shape), len(shape)) for keepdims in [False, True])) def testReducer(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims): @@ -215,7 +233,7 @@ def testReducer(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims): "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name), "axis": axis} for rec in JAX_ARGMINMAX_RECORDS - for shape in all_shapes for dtype in rec.dtypes + for shape in rec.shapes for dtype in rec.dtypes for axis in range(-len(shape), len(shape)))) def testArgMinMax(self, onp_op, lnp_op, rng, shape, dtype, axis):
Broadcasting of size-0 dimensions not implemented Numpy supports broadcasts with size-0 dimensions against size-1 dimensions: ``` onp.ones([0,1]) + onp.ones([1,128]) ``` produces: ``` array([], shape=(0, 128), dtype=float64) ``` However ``` to_device = jax.jit(lambda x:x) to_device(np.ones([0,1])) + to_device(np.ones([1,128])) ValueError: Incompatible shapes for broadcasting: ((0, 1), (1, 128)) ``` The broadcasting rule computes the output shape as ``` result_shape = onp.max(shapes, axis=0) ``` but it probably needs to be something like this: ``` min_shape = onp.min(shapes, axis=0) max_shape = onp.max(shapes, axis=0) result_shape = onp.where(min_shape == 0, 0, max_shape) ```
2018-12-10T14:02:58
google/jax
77
google__jax-77
[ "55" ]
0f9a69fb8136f3f3c254774a67f0f27a51a61f63
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -327,6 +327,30 @@ def transpose(x, axis=None): return lax.transpose(x, axis) +@_wraps(onp.rot90) +def rot90(m, k=1, axes=(0, 1)): + ax1, ax2 = axes + if ax1 % m.ndim == ax2 % m.ndim: + raise ValueError("Axes must be different") # same as numpy error + k = k % 4 + if k == 0: + return m + elif k == 2: + return flip(flip(m, ax1), ax2) + else: + perm = list(range(m.ndim)) + perm[ax1], perm[ax2] = perm[ax2], perm[ax1] + if k == 1: + return transpose(flip(m, ax2), perm) + else: + return flip(transpose(m, perm), ax2) + + +@_wraps(onp.flip) +def flip(m, axis): + return lax.rev(m, [axis]) + + @_wraps(onp.sinh) def sinh(x): x, = _promote_to_result_dtype(onp.sinh, x) @@ -454,7 +478,10 @@ def where(condition, x=None, y=None): if not onp.issubdtype(_dtype(condition), onp.bool_): condition = lax.ne(condition, zeros_like(condition)) condition, x, y = broadcast_arrays(condition, x, y) - return lax.select(condition, *_promote_dtypes(x, y)) + if not x.size: + return x + else: + return lax.select(condition, *_promote_dtypes(x, y)) def broadcast_arrays(*args):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -171,34 +171,36 @@ class LaxBackedNumpyTests(jtu.JaxTestCase): def _GetArgsMaker(self, rng, shapes, dtypes): return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)] - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes, - dtypes), - "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, - "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} - for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS, - JAX_COMPOUND_OP_RECORDS) - for shapes in filter( - _shapes_are_broadcast_compatible, - CombosWithReplacement(rec.shapes, rec.nargs)) - for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes, + dtypes), + "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, + "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(rec.shapes, rec.nargs)) + for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs)) + for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS, JAX_COMPOUND_OP_RECORDS))) def testOp(self, onp_op, lnp_op, rng, shapes, dtypes): args_maker = self._GetArgsMaker(rng, shapes, dtypes) self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes, - dtypes), - "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, - "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} - for rec in JAX_BITWISE_OP_RECORDS - for shapes in filter( - _shapes_are_broadcast_compatible, - CombosWithReplacement(rec.shapes, rec.nargs)) - for dtypes in filter( - _dtypes_are_compatible_for_bitwise_ops, - CombosWithReplacement(rec.dtypes, rec.nargs)))) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes, + dtypes), + "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, + "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)} + for rec in JAX_BITWISE_OP_RECORDS + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(rec.shapes, rec.nargs)) + for dtypes in filter( + _dtypes_are_compatible_for_bitwise_ops, + CombosWithReplacement(rec.dtypes, rec.nargs))) + for rec in JAX_BITWISE_OP_RECORDS)) def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes): if not FLAGS.jax_enable_x64 and any( onp.iinfo(dtype).bits == 64 for dtype in dtypes): @@ -622,6 +624,41 @@ def DISABLED_testTracingPrimitiveWithNoTranslationErrorMessage(self): cfoo = api.jit(foo) self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3))) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_{}_axis={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis), + "rng": rng, "shape": shape, "dtype": dtype, "axis": axis} + for shape in [(3,), (2, 3)] + for dtype in default_dtypes + for axis in range(len(shape)) + for rng in [jtu.rand_default()])) + def testFlip(self, shape, dtype, axis, rng): + args_maker = self._GetArgsMaker(rng, [shape], [dtype]) + lnp_op = lambda x: lnp.flip(x, axis) + onp_op = lambda x: onp.flip(x, axis) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_{}_k={}_axes={}".format( + jtu.format_shape_dtype_string(shape, dtype), k, axes), + "rng": rng, "shape": shape, "dtype": dtype, "k": k, "axes": axes} + for shape, axes in [ + [(2, 3), (0, 1)], + [(2, 3), (1, 0)], + [(4, 3, 2), (0, 2)], + [(4, 3, 2), (2, 1)], + ] + for k in range(-3, 4) + for dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testRot90(self, shape, dtype, k, axes, rng): + args_maker = self._GetArgsMaker(rng, [shape], [dtype]) + lnp_op = lambda x: lnp.rot90(x, k, axes) + onp_op = lambda x: onp.rot90(x, k, axes) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) + # TODO(mattjj): test infix operator overrides def DISABLED_testRavel(self): diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -137,27 +137,29 @@ def op_record(op, nargs, dtypes, rng, tol=1e-5): class LaxTest(jtu.JaxTestCase): """Numerical tests for LAX operations.""" - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix( - rec.op.__name__, shapes, itertools.repeat(dtype)), - "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype} - for rec in LAX_OPS - for shape_group in compatible_shapes - for shapes in CombosWithReplacement(shape_group, rec.nargs) - for dtype in rec.dtypes)) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix( + rec.op.__name__, shapes, itertools.repeat(dtype)), + "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype} + for shape_group in compatible_shapes + for shapes in CombosWithReplacement(shape_group, rec.nargs) + for dtype in rec.dtypes) + for rec in LAX_OPS)) def testOp(self, op, rng, shapes, dtype): args_maker = lambda: [rng(shape, dtype) for shape in shapes] self._CompileAndCheck(op, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix( - rec.op.__name__, shapes, itertools.repeat(dtype)), - "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, - "tol": rec.tol} - for rec in LAX_OPS - for shape_group in compatible_shapes - for shapes in CombosWithReplacement(shape_group, rec.nargs) - for dtype in rec.dtypes)) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix( + rec.op.__name__, shapes, itertools.repeat(dtype)), + "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, + "tol": rec.tol} + for shape_group in compatible_shapes + for shapes in CombosWithReplacement(shape_group, rec.nargs) + for dtype in rec.dtypes) + for rec in LAX_OPS)) def testOpAgainstNumpy(self, op, rng, shapes, dtype, tol): args_maker = lambda: [rng(shape, dtype) for shape in shapes] numpy_op = getattr(lax_reference, op.__name__) @@ -1436,16 +1438,16 @@ def check_grads_bilinear(f, args, order, atol=None, rtol=None): class LaxAutodiffTest(jtu.JaxTestCase): - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix( - rec.op.__name__, shapes, itertools.repeat(dtype)), - "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, - "order": rec.order} - for rec in LAX_GRAD_OPS - for shape_group in compatible_shapes - for shapes in CombosWithReplacement(shape_group, rec.nargs) - for dtype in rec.dtypes - )) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix( + rec.op.__name__, shapes, itertools.repeat(dtype)), + "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, + "order": rec.order} + for shape_group in compatible_shapes + for shapes in CombosWithReplacement(shape_group, rec.nargs) + for dtype in rec.dtypes) + for rec in LAX_GRAD_OPS)) def testOpGrad(self, op, rng, shapes, dtype, order): if FLAGS.jax_test_dut and FLAGS.jax_test_dut.startswith("tpu"): if dtype is onp.complex64:
np.rot90 support There is not provision of rotation of tensor in `lux_numpy.py`
2018-12-11T19:36:41
google/jax
79
google__jax-79
[ "78" ]
9cd60279791b78550f22f1a4e09ae414e63bba94
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -78,21 +78,18 @@ def jacrev(fun, x): def hessian(fun): return jacfwd(jacrev(fun)) -def vmap(fun, *args, **kwargs): - in_axes = kwargs.pop("in_axes", 0) - out_axes = kwargs.pop("out_axes", 0) - if kwargs: - msg = "vmap keyword args must be 'in_axes' and/or 'out_axes', got {}." - raise TypeError(msg.format(', '.join(kwargs))) - - if type(in_axes) is int: - in_axes = (in_axes,) * len(args) - if not isinstance(fun, lu.WrappedFun): - fun = lu.wrap_init(fun) - in_flat, in_trees = unzip2(map(tree_to_jaxtuples, args)) - flat_fun, out_tree = flatten_fun(fun, in_trees) - out_flat = batching.batch(flat_fun, in_flat, in_axes, out_axes) - return build_tree(out_tree(), out_flat) +def vmap(fun, in_axes=0, out_axes=0): + + def batched_fun(*args, **kwargs): + if not isinstance(fun, lu.WrappedFun): + f = lu.wrap_init(fun) + in_axes_ = (in_axes,) * len(args) if type(in_axes) is int else in_axes + in_flat, in_trees = unzip2(map(tree_to_jaxtuples, args)) + flat_fun, out_tree = flatten_fun(f, in_trees) + out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes) + return build_tree(out_tree(), out_flat) + + return batched_fun def jvp(fun, primals, tangents): def flatten_arg(primal, tangent):
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -31,21 +31,16 @@ from jax.interpreters import partial_eval as pe from jax.util import partial, curry -import functools as fn - class BatchingTest(jtu.JaxTestCase): def testConstantFunction(self): - ans = vmap(lambda x: 3, onp.ones(4)) + ans = vmap(lambda x: 3)(onp.ones(4)) expected = 3 * onp.ones(4) self.assertAllClose(ans, expected, check_dtypes=False) def testNestedBatchingMatMat(self): - def matvec(A, b): - return vmap(np.vdot, A, b, in_axes=(0, None)) - - def matmat(A, B): - return vmap(matvec, A, B, in_axes=(None, 1), out_axes=1) + matvec = vmap(np.vdot, in_axes=(0, None)) + matmat = vmap(matvec, in_axes=(None, 1), out_axes=1) R = onp.random.RandomState(0).randn A = R(4, 3) @@ -94,7 +89,7 @@ def loss(params, data): target_batch = R(5, 4) batch = (input_batch, target_batch) - ans = vmap(partial(grad(loss), params), batch) + ans = vmap(partial(grad(loss), params))(batch) for ans_pair, param_pair in zip(ans, params): dW, db = ans_pair @@ -107,13 +102,13 @@ def testJacobians(self): def jacbwd(f, x): y, pullback = vjp(f, x) std_basis = onp.eye(onp.size(y)).reshape((-1,) + onp.shape(y)) - jac_flat, = vmap(pullback, std_basis, out_axes=onp.ndim(y)) + jac_flat, = vmap(pullback, out_axes=onp.ndim(y))(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) def jacfwd(f, x): pushfwd = lambda v: jvp(f, (x,), (v,)) std_basis = onp.eye(onp.size(x)).reshape((-1,) + onp.shape(x)) - y, jac_flat = vmap(pushfwd, std_basis, out_axes=(None, 0)) + y, jac_flat = vmap(pushfwd, out_axes=(None, 0))(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) R = onp.random.RandomState(0).randn @@ -133,7 +128,7 @@ def f(x): side.append(None) return x + x - g = jit(lambda x: vmap(f, x)) + g = jit(vmap(f)) self.assertAllClose(g(onp.ones(2)), 2 * onp.ones(2), check_dtypes=False) self.assertEqual(len(side), 1) self.assertAllClose(g(2 * onp.ones(2)), 4 * onp.ones(2), @@ -145,7 +140,7 @@ def testSliceLax(self): R = onp.random.RandomState(0).randn x = R(5, 10) - ans = vmap(fun, x) + ans = vmap(fun)(x) expected_ans = x[:, 2:4] self.assertAllClose(ans, expected_ans, check_dtypes=False) @@ -154,7 +149,7 @@ def testSliceNumpy(self): R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) - ans = vmap(fun, x) + ans = vmap(fun)(x) expected_ans = x[:, :, 2] self.assertAllClose(ans, expected_ans, check_dtypes=False) @@ -163,7 +158,7 @@ def testNpMaximum(self): R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) - ans = vmap(fun, x) + ans = vmap(fun)(x) expected_ans = onp.maximum(x, 0.0) self.assertAllClose(ans, expected_ans, check_dtypes=False) @@ -171,7 +166,7 @@ def testNpGtrThan(self): R = onp.random.RandomState(0).randn x = R(10, 5, 3, 7) - ans = vmap(lambda x: x > 1.0, x) + ans = vmap(lambda x: x > 1.0)(x) expected_ans = x > 1.0 self.assertAllClose(ans, expected_ans, check_dtypes=True) @@ -182,7 +177,7 @@ def testNpMaximumPerExampleGrad(self): fun = lambda W, x: np.sum(np.maximum(np.dot(x, W), 0.0) ** 2) - ans = vmap(fn.partial(grad(fun), W), x) + ans = vmap(partial(grad(fun), W))(x) W_t = np.transpose(W) for i in range(10): @@ -199,44 +194,44 @@ def testDotGeneral(self): x = R(10, 3, 4, 5) y = R(10, 3, 5, 6) - ans = vmap(lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]), x, y) + fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) + ans = vmap(fun)(x, y) expected = lax.dot_general(x, y, [((3,), (2,)), ((0, 1), (0, 1))]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 10, 5) y = R(3, 10, 5, 6) - ans = vmap(lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]), x, y, - in_axes=(2, 1)) + fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) + ans = vmap(fun, in_axes=(2, 1))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x[..., i, :], y[:, i, ...]) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 5, 10) y = R(3, 5, 6) - ans = vmap(lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]), x, y, - in_axes=(3, None)) + fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) + ans = vmap(fun, in_axes=(3, None))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x[..., i], y) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) x = R(3, 4, 5) y = R(3, 5, 10, 6) - ans = vmap(lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]), x, y, - in_axes=(None, 2)) + fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) + ans = vmap(fun, in_axes=(None, 2))(x, y) fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))]) expected = onp.stack([fun(x, y[..., i, :]) for i in range(10)]) self.assertAllClose(ans, expected, check_dtypes=True) def testDot(self): # these tests are based on @shoyer's notebook studying gufuncs - curried_vmap = curry(vmap) def vecvec(a, b): dot = np.dot for ndim in range(1, max(a.ndim, b.ndim)): a_ax = 0 if a.ndim > ndim else None b_ax = 0 if b.ndim > ndim else None - dot = curried_vmap(dot, in_axes=(a_ax, b_ax)) + dot = vmap(dot, in_axes=(a_ax, b_ax)) return dot(a, b) assert vecvec(np.zeros((3,)), np.zeros((3,))).shape == ()
curry `vmap` We want to change the API to look more like `vmap(fun)` and `vmap(fun)(x, y)` instead of the current `vmap(fun, x, y)`. That makes it more consistent with the other main transformations (`jit` and `grad`, plus all the other autodiff ones) and seems to be more convenient given the experiences of @shoyer and @alexbw.
2018-12-11T20:54:36
google/jax
82
google__jax-82
[ "54" ]
3ac1001c4929d9fb69c05a34ce35b4f91d7778ea
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -64,7 +64,7 @@ def jacfwd(fun, x): fun = lu.wrap_init(fun) pushfwd = partial(jvp, fun, (x,)) std_basis = onp.eye(onp.size(x)).reshape((-1,) + onp.shape(x)), - y, jac_flat = vmap(pushfwd, std_basis, out_axes=(None, 0)) + y, jac_flat = vmap(pushfwd, out_axes=(None, -1))(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) @curry @@ -72,7 +72,7 @@ def jacrev(fun, x): fun = lu.wrap_init(fun) y, pullback = vjp(fun, x) std_basis = onp.eye(onp.size(y)).reshape((-1,) + onp.shape(y)) - jac_flat, = vmap(pullback, std_basis, out_axes=onp.ndim(y)) + jac_flat, = vmap(pullback, out_axes=0)(std_basis) return jac_flat.reshape(onp.shape(y) + onp.shape(x)) def hessian(fun): diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -281,6 +281,7 @@ def moveaxis(sz, dst, src, x): else: return pack(map(partial(moveaxis, sz, dst, src), x)) elif isinstance(aval, ShapedArray): + dst = (dst % aval.ndim) if dst is not None and aval.ndim else dst if src == dst: return x else: diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1362,11 +1362,20 @@ def concatenate_transpose_rule(t, *operands, **kwargs): return [slice(t, start, limit) if o is None else None for o, start, limit in zip(operands, starts, limits)] +def concatenate_batch_rule(batched_args, batch_dims, dimension, operand_shapes): + size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims) + if bdim is not None) + operands = [batching.move_dim_to_front(op, bdim) if bdim is not None + else broadcast(op, (size,)) + for op, bdim in zip(batched_args, batch_dims)] + return concatenate(operands, dimension + 1), 0 + concatenate_p = standard_primitive( concatenate_shape_rule, concatenate_dtype_rule, 'concatenate', concatenate_translation_rule) ad.deflinear(concatenate_p, concatenate_transpose_rule) ad.primitive_transposes[concatenate_p] = concatenate_transpose_rule +batching.primitive_batchers[concatenate_p] = concatenate_batch_rule def pad_shape_rule(operand, padding_value, padding_config): @@ -1398,9 +1407,21 @@ def pad_transpose(t, operand, padding_value, padding_config): return [t_operand, t_padv] +def pad_batch_rule(batched_args, batch_dims, padding_config): + operand, padding_value = batched_args + operand_bdim, padding_value_bdim = batch_dims + if padding_value_bdim is None: + assert operand_bdim is not None + padding_config = list(padding_config) + padding_config.insert(operand_bdim, (0, 0, 0)) + return pad(operand, padding_value, padding_config), operand_bdim + else: + raise NotImplementedError + pad_p = standard_primitive(pad_shape_rule, _input_dtype, 'pad') ad.deflinear(pad_p, pad_transpose) ad.primitive_transposes[pad_p] = pad_transpose +batching.primitive_batchers[pad_p] = pad_batch_rule def reshape_shape_rule(operand, new_sizes, dimensions, **unused_kwargs):
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -24,7 +24,7 @@ import jax.numpy as np from jax.config import config -from jax import jit, grad, device_get, device_put +from jax import jit, grad, device_get, device_put, jacfwd, jacrev from jax.core import Primitive from jax.interpreters.partial_eval import def_abstract_eval from jax.interpreters.ad import defjvp @@ -235,6 +235,18 @@ def test_device_put_and_get(self): assert isinstance(y2[1][1], onp.ndarray) assert onp.all(y2[1][1] == 3 * x) + def test_jacobian(self): + R = onp.random.RandomState(0).randn + A = R(4, 3) + x = R(3) + + f = lambda x: np.dot(A, x) + assert onp.allclose(jacfwd(f)(x), A) + assert onp.allclose(jacrev(f)(x), A) + + f = lambda x: np.tanh(np.dot(A, x)) + assert onp.allclose(jacfwd(f)(x), jacrev(f)(x)) + if __name__ == '__main__': config.config_with_absl() diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -24,7 +24,7 @@ from jax import test_util as jtu from jax.abstract_arrays import ShapedArray from jax import lax -from jax.api import jit, grad, jvp, vjp, trace_to_jaxpr +from jax.api import jit, grad, jvp, vjp, trace_to_jaxpr, jacfwd, jacrev from jax.api import vmap from jax.config import config from jax.core import unit @@ -239,6 +239,49 @@ def vecvec(a, b): # TODO(mattjj): this fails due to an xla error in dot_general # assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2) + def testPad(self): + R = onp.random.RandomState(0).randn + + fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1)]) + x = R(5, 10).astype(onp.float32) + ans = vmap(fun)(x) + expected_ans = np.stack(list(map(fun, x))) + self.assertAllClose(ans, expected_ans, check_dtypes=False) + + + fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1), (0, 1, 0)]) + x = R(5, 10, 3).astype(onp.float32) + ans = vmap(fun)(x) + expected_ans = np.stack(list(map(fun, x))) + self.assertAllClose(ans, expected_ans, check_dtypes=False) + + def testConcatenate(self): + R = lambda *shape: onp.random.RandomState(0).randn(*shape).astype(onp.float32) + + fun = lambda *args: lax.concatenate(args, dimension=0) + x, y, z = R(10, 2, 3), R(1, 10, 3), R(4, 3) + ans = vmap(fun, in_axes=(0, 1, None))(x, y, z) + expected_ans = onp.concatenate([x, onp.swapaxes(y, 0, 1), + onp.broadcast_to(z, (10, 4, 3))], 1) + self.assertAllClose(ans, expected_ans, check_dtypes=False) + + fun = lambda *args: lax.concatenate(args, dimension=1) + x, y, z = R(10, 2, 1), R(2, 3), R(2, 4, 10) + ans = vmap(fun, in_axes=(0, None, 2))(x, y, z) + expected_ans = onp.concatenate([x, onp.broadcast_to(y, (10, 2, 3)), + onp.moveaxis(z, 2, 0)], 2) + self.assertAllClose(ans, expected_ans, check_dtypes=False) + + def testJacobianIssue54(self): + # test modeling the code in https://github.com/google/jax/issues/54 + + def func(xs): + return np.array([x for x in xs]) + + xs = np.ones((5, 1)) + jacrev(func)(xs) # don't crash + jacfwd(func)(xs) # don't crash + if __name__ == '__main__': config.config_with_absl()
Batching rules for pad and concatenate primitives not implemented I'm interested in using JAX to compute Jacobians for functions which involve iteratively applying operations to generate a sequence. When using Autograd for this, in order to avoid indexed assignment I would create a list which is iteratively populated with the sequence values and then create an array from the list using `np.array` or `np.stack`. Attempting the same in JAX (built from source with fc4afb409b) raises a `NotImplementedError` when trying to compute the Jacobian of such a function with either `jacrev` or `jacfwd` as batching rules are not implemented for the `pad` and `concatenate` primitives respectively. As a minimal example ```Python import jax.numpy as np from jax import jacrev, jacfwd def func(xs): return np.array([x for x in xs]) jacrev_func = jacrev(func) jacfwd_func = jacfwd(func) xs = np.ones((5, 1)) jacrev_func(xs) # raises NotImplementedError: Batching rule for 'pad' not implemented jacfwd_func(xs) # raises NotImplementedError: Batching rule for 'concatenate' not implemented ``` The same errors are raised when replacing `np.array` in the defnition of `func` with `np.stack`, `np.hstack`, `np.vstack` or `np.concatenate`.
2018-12-12T00:26:01
google/jax
92
google__jax-92
[ "91" ]
cad36945a2040aaa842a62f57649ec6b12850196
diff --git a/examples/onnx2xla.py b/examples/onnx2xla.py new file mode 100644 --- /dev/null +++ b/examples/onnx2xla.py @@ -0,0 +1,139 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""An ONNX to XLA compiler by JAX-tracing a Numpy-backed ONNX interpreter.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from cStringIO import StringIO +from functools import partial +import hashlib +import sys + +import onnx +from onnx import numpy_helper +from onnx import onnx_pb2 +from six.moves.urllib.request import urlopen + +import jax.numpy as np +from jax import jit, grad +from jax import lax + + +def _asarray(proto): + return numpy_helper.to_array(proto).reshape(tuple(proto.dims)) + + +attr_types = dict(onnx_pb2.AttributeProto.AttributeType.items()) +attribute_handlers = { + attr_types['FLOAT']: lambda a: a.f, + attr_types['INT']: lambda a: a.i, + attr_types['STRING']: lambda a: a.s, + attr_types['TENSOR']: lambda a: _asarray(a.t), + attr_types['FLOATS']: lambda a: a.floats, + attr_types['INTS']: lambda a: a.ints, + attr_types['STRINGS']: lambda a: a.strings, + attr_types['TENSORS']: lambda a: [_asarray(x) for x in a.tensors], +} + + +def onnx_maxpool(x, kernel_shape, pads=None, strides=None): + """Numpy-backed implementation of ONNX MaxPool op.""" + prefix = (1,) * (x.ndim - len(kernel_shape)) + dims = prefix + tuple(kernel_shape) + pads = tuple(pads) if pads else [0] * len(kernel_shape) + strides = (prefix + tuple(strides)) if strides else [1] * len(kernel_shape) + return [lax.reduce_window(x, -np.inf, lax.max, dims, strides, 'VALID')] + + +def onnx_conv(x, w, b=0, group=1, kernel_shape=None, pads=None, strides=None, + dilations=None, auto_pad=None): + """Numpy-backed implementation of ONNX Conv op.""" + assert group == 1 + kernel_shape = kernel_shape or w.shape + strides = strides or [1] * (w.ndim - 2) + if auto_pad: + auto_pad = 'SAME' if auto_pad.startswith('SAME') else 'VALID' + pads = lax.padtype_to_pads(x.shape[2:], w.shape[2:], strides, auto_pad) + else: + pads = pads or [0] * (w.ndim - 2) + lhs_dilation = [1] * (w.ndim - 2) + rhs_dilation = dilations or [1] * (w.ndim - 2) + return [lax.conv_with_general_padding(x, w, strides, pads, + lhs_dilation, rhs_dilation) + b] + + +def onnx_add(a, b, axis=None, broadcast=True): + """Numpy-backed implementation of ONNX Add op.""" + if broadcast: + axis = (a.dim - b.ndim) if axis is None else axis % a.ndim + assert a.shape[axis:][:b.ndim] == b.shape + b_shape = np.ones(a.ndim, dtype='int64').copy() + b_shape[axis:axis + b.ndim] = b.shape + b = np.reshape(b, b_shape) + return [a + b] + + +onnx_ops = { + 'Add': onnx_add, + 'Constant': lambda value: [value], + 'Conv': onnx_conv, + 'MatMul': lambda x, y: [np.matmul(x, y)], + 'MaxPool': onnx_maxpool, + 'Relu': lambda x: [np.maximum(x, 0)], + 'Reshape': lambda x, shape: [np.reshape(x, shape)], +} + + +def interpret_onnx(graph, *args): + vals = dict({n.name: a for n, a in zip(graph.input, args)}, + **{n.name: _asarray(n) for n in graph.initializer}) + for node in graph.node: + args = (vals[name] for name in node.input) + attrs = {a.name: attribute_handlers[a.type](a) for a in node.attribute} + outputs = onnx_ops[node.op_type](*args, **attrs) + for name, output in zip(node.output, outputs): + vals[name] = output + return [vals[n.name] for n in graph.output] + + +if __name__ == "__main__": + # It seems that there are several ONNX proto versions (you had one job!) but + # this implementation works with at least this one mnist example file. + url = ('https://github.com/onnx/models/blob/' + '81c4779096d1205edd0b809e191a924c58c38fef/' + 'mnist/model.onnx?raw=true') + download = urlopen(url).read() + if hashlib.md5(download).hexdigest() != 'bc8ad9bd19c5a058055dc18d0f089dad': + print("onnx file checksum mismatch") + sys.exit(1) + model = onnx.load(StringIO(download)) + + predict = lambda inputs: interpret_onnx(model.graph, inputs)[0] + + # Run inference in Numpy-backed interpreter + print("interpreted:") + print(predict(np.ones((1, 1, 28, 28)))) + + # JIT compile to XLA device, run inference on device + compiled_predict = jit(predict) + print("compiled:") + print(compiled_predict(np.ones((1, 1, 28, 28)))) + + # The interpreter is differentiable too! Even the compiled one: + fun = lambda inputs: np.sum(compiled_predict(inputs)) + print("a derivative with respect to inputs:") + print(grad(fun)(np.ones((1, 1, 28, 28)))[..., :3, :3]) +
Resurrect ONNX -> jaxpr compatibility This worked in a demo at one point. Would be great to be able to load all models in the [ONNX model zoo](https://github.com/onnx/models)
2018-12-12T21:21:14
google/jax
102
google__jax-102
[ "29" ]
3fba60fcd58ec00537af9f5e4b09a6cc5997229b
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -93,6 +93,12 @@ class ndarray(six.with_metaclass(_ArrayMeta, onp.ndarray)): float64 = onp.float64 complex64 = onp.complex64 +integer = onp.integer + +iinfo = onp.iinfo +finfo = onp.finfo + +issubdtype = onp.issubdtype ### utility functions @@ -866,6 +872,30 @@ def triu(m, k=0): return where(mask, zeros_like(m), m) +@_wraps(onp.trace) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + if out: + raise NotImplementedError("The 'out' argument to trace is not supported.") + + a_shape = shape(a) + if dtype is None: + dtype = _dtype(a) + if issubdtype(dtype, integer): + default_int = xla_bridge.canonicalize_dtype(onp.int_) + if iinfo(dtype).bits < iinfo(default_int).bits: + dtype = default_int + + # Move the axis? dimensions to the end. + perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2] + perm = perm + [axis1, axis2] + a = lax.transpose(a, perm) + + # Mask out the diagonal and reduce. + a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool), + a, zeros_like(a)) + return sum(a, axis=(-2, -1), dtype=dtype) + + @_wraps(onp.diagonal) def diagonal(a, offset=0, axis1=0, axis2=1): a_shape = shape(a)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -466,6 +466,24 @@ def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format( + jtu.format_shape_dtype_string(shape, dtype), + out_dtype, offset, axis1, axis2), + "dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset, + "axis1": axis1, "axis2": axis2, "rng": jtu.rand_default()} + for dtype in default_dtypes + for out_dtype in [None] + default_dtypes + for shape in [shape for shape in all_shapes if len(shape) >= 2] + for (axis1, axis2) in itertools.combinations(range(len(shape)), 2) + for offset in list(range(-4, 4)))) + def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng): + onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype) + lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype) + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}".format( jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
Add support for `np.trace` Goal is to use `@jit` on a function containing a call `np.trace(..)`. My rudimentary attempt to implement via indexing also fails ```python from jax import numpy from jax.api import jit import numpy as onp @jit def trace(A): return np.trace(A) # Exception: Numpy function <function trace at 0x7f89bee1eb90> not yet implemented @jit def trace(A): idx = onp.diag_indices(len(A)) diag = A[idx] # TypeError: No abstraction handler for type: <type 'tuple'> return np.sum(diag) ```
Muwahaha, a workaround! ```python def trace(A): idx = onp.diag_indices(len(A)) flat_idx = onp.ravel_multi_index(idx, A.shape) diag = A.ravel()[flat_idx] return np.sum(diag) ```
2018-12-13T13:46:04
google/jax
110
google__jax-110
[ "44" ]
32b339a582dc7a0e88725f8d3bcca9b78f806872
diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -30,11 +30,20 @@ matmul = np.matmul trace = np.trace +_T = lambda x: np.swapaxes(x, -1, -2) @_wraps(onp.linalg.cholesky) def cholesky(a): return lax_linalg.cholesky(a) +@_wraps(onp.linalg.inv) +def inv(a): + if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]: + raise ValueError("Argument to inv must have shape [..., n, n], got {}." + .format(np.shape(a))) + q, r = qr(a) + return lax_linalg.triangular_solve(r, _T(q), lower=False, left_side=True) + @_wraps(onp.linalg.qr) def qr(a, mode="reduced"): diff --git a/jax/scipy/linalg.py b/jax/scipy/linalg.py --- a/jax/scipy/linalg.py +++ b/jax/scipy/linalg.py @@ -21,6 +21,7 @@ from .. import lax_linalg from ..numpy.lax_numpy import _wraps from ..numpy import lax_numpy as np +from ..numpy import linalg as np_linalg @_wraps(scipy.linalg.cholesky) @@ -33,6 +34,12 @@ def cholesky(a, lower=False, overwrite_a=False, check_finite=True): return lax_linalg.cholesky(a) +@_wraps(scipy.linalg.inv) +def inv(a, overwrite_a=False, check_finite=True): + del overwrite_a, check_finite + return np_linalg.inv(a) + + @_wraps(scipy.linalg.qr) def qr(a, overwrite_a=False, lwork=None, mode="full", pivoting=False, check_finite=True):
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -109,6 +109,29 @@ def compare_orthogonal(q1, q2): # Check that q is close to unitary. self.assertTrue(onp.all(norm(onp.eye(k) - onp.matmul(T(lq), lq)) < 5)) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype, "rng": rng} + for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (5, 5, 5)] + for dtype in float_types() + for rng in [jtu.rand_default()])) + def testInv(self, shape, dtype, rng): + def args_maker(): + invertible = False + while not invertible: + a = rng(shape, dtype) + try: + onp.linalg.inv(a) + invertible = True + except onp.linalg.LinAlgError: + pass + return [a] + + self._CheckAgainstNumpy(onp.linalg.inv, np.linalg.inv, args_maker, + check_dtypes=True, tol=1e-3) + self._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True) + class ScipyLinalgTest(jtu.JaxTestCase):
np.linalg.inv support I've been trying to implement Gaussian Processes Regression, which require the calculation of a matrix inverse. With regular numpy I would use `np.linalg.inv`, but I can't find this function back in jax. Everything else is working as expected, and I can use `np.linalg.inv` for basic calculations. Unfortunately, the use of `np.linalg.inv` keeps me from using `grad` to calculate gradients, which would be the most exciting part of the whole implementation! I would love to contribute a PR if someone can tell me where to start.
Thanks for bringing this up, and for the offer to help! [Good `np.linalg` linear algebra support](https://github.com/HIPS/autograd/blob/master/autograd/numpy/linalg.py) was one of the best parts of Autograd, with a lot of users choosing Autograd just for that. It's also near and dear to my heart because working on the linear algebra support is how I first got into developing Autograd, and it's critical to my kind of machine learning research. JAX needs to get there too! I believe the main challenge is getting XLA to call into backend-specific linear algebra routines (e.g. in LAPACK and MAGMA) using the [CustomCall HLO](https://www.tensorflow.org/xla/operation_semantics#customcall) on CPU and GPU (and, more generally, call into the XLA client library for TPU). @hawkinsp has already started to look into what we need. Once we can generate calls into these routines, we can use [similar rules as in Autograd](https://github.com/HIPS/autograd/blob/master/autograd/numpy/linalg.py) for differentiation. An alternative route is just to implement the algorithms we need in terms of `lax` primitives, since we can already compile and differentiate all of those (on any backend). That's the approach taken in [the `jax.experimental.lapax` module](https://github.com/google/jax/blob/master/jax/experimental/lapax.py), which just has `cholesky` and `solve_triangular`. As you can see from e.g. the [cholesky](https://github.com/google/jax/blob/master/jax/experimental/lapax.py#L42-L58) routine, these algorithms aren't so bad to implement ourselves, but they'll likely be slower on CPU and GPU than the extremely well optimized LAPACK and MAGMA kernels. (Even for TPU, we'd rather reuse [HLO implementations in the nascent XLA client library](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/tf2xla/lib/cholesky.cc) than duplicate that effort in JAX, as convenient as it is to write this code in Python.) We'd love to get contributions on this. I think the best course of action is to wait for us to sort out our CustomCall / client library story, which we might be able to do this week (fingers crossed), and to make it work with one linear algebra routine (e.g. cholesky). Once we get one example working, it would be really helpful for contributors to dive in and help us set up the rest, along with their derivatives. How does that sound to you? To unstick your work for now, given that you're working on GPs you might be able to work with the `cholesky` and `solve_triangular` routines in `jax.experimental.lapax`. You might also be able to implement something like a CG iteration, though that path has its own bumps that need ironing out (@jaspersnoek mentioned he'd had some success in this direction). One other thing: it's not documented yet, but JAX has a configuration option for enabling 64-bit dtypes, which are off by default (instead capping everything to 32 bits). You can switch it on like this: ```python import jax.numpy as np from jax.config import config config.update("jax_enable_x64", True) print np.dot(np.zeros(2), np.zeros(2)).dtype # should print 'float64' ``` Here's a quick example of sampling from a GP with a squared exponential covariance function using jax. ``` import numpy as onp import matplotlib.pyplot as plt import jax.numpy as np import jax.random as random from jax.experimental import lapax key = random.PRNGKey(0) numpts = 50 def dist(x1, x2): distance = -2. * np.dot(x1, x2.T) + np.sum(x2**2, axis=1) + np.sum(x**2, axis=1)[:, None] return distance def cov(x1, x2): return np.exp(-dist(x1, x2)) x = onp.linspace(0, 1., numpts)[:, None] K = cov(x, x) + onp.eye(x.shape[0]) * 1e-6 L = lapax.cholesky(K + onp.eye(x.shape[0]) * 1e-6) normal_samp = random.normal(key, shape=(x.shape[0], 10)) y_hat = np.dot(L, normal_samp) plt.plot(x, y_hat)``` Nice, thanks so much @JasperSnoek! ~~I think `eye` recently got into `jax.numpy`, I'll add `linspace` now too so that `onp` import won't be necessary.~~ EDIT: nevermind, looks like it's in there, but we should clean up how we handle those constant-creation functions. I must admit I did see the cholesky decomposition in lapax already, but did not make the connection to using it for this yet! I am going to dive into this right now. For completeness sake, here is what I have done so far: ``` import jax.numpy as np from jax import random, grad from numpy import log10, diag from numpy.linalg import inv, det import matplotlib.pyplot as plt LOG2PI = log10(2 * np.pi) def ard_kernel(x1, x2, length_scale): return np.exp(-length_scale * ((x1.T - x2) ** 2)) def gp_predict(x_train, y_train, x_test, variance, length_scale, kernel = ard_kernel): k_x_x = kernel(x_train, x_train, length_scale) k_x_xs = kernel(x_train, x_test, length_scale) k_xs_x = k_x_xs.T k_xs_xs = kernel(x_test, x_test, length_scale) v_inv = inv(k_x_x + variance * np.eye(x_train.size)) q = np.dot(k_x_xs, v_inv) mu = np.dot(q, y_train).T[0] cov = k_xs_xs - np.dot(q, k_xs_x) sigma = diag(cov) return mu,sigma def gp_log_marginal_likelihood(x_train, y_train, variance, length_scale, kernel = ard_kernel): k_x_x = kernel(x_train, x_train, length_scale) k_var = k_x_x + variance * np.eye(x_train.size) v_inv = inv(k_var) data_fit = np.dot(np.dot(-.5 * y_train.T, v_inv), y_train) complexity = .5 * np.log(det(k_var)) size_correction = .5 * x_train.size * LOG2PI return (-data_fit - complexity - size_correction)[0][0] length_scale = 100 variance = .1 size = 50 random_key = random.PRNGKey(0) x_train = 2 * random.uniform(random_key, shape=(size,)).reshape((-1,1)) y_train = np.sin(x_train * 10) + random.normal(random_key, shape=(size,)).reshape((-1,1)) x_test = np.arange(-.5,2.5,.05).reshape((-1,1)) mu, sigma = gp_predict(x_train, y_train, x_test, variance, length_scale) plt.plot(x_test, mu) plt.fill_between(x_test.T[0], mu + 2 * sigma, mu - 2 * sigma, alpha = .5) plt.scatter(x_train.T, y_train.T, c = 'black') ``` This is following Rasmussen & Williams quite literally, which is why I resorted to `np.linalg.inv` in the first place. In addition there are a few functions that I haven't found in JAX yet: `log10`,`diag` and `np.linalg.det`. From what I understand, and have seen from examples, getting the predictive mean using the cholesky decomposition uses `np.linalg.solve`. What would be the JAX approach for this? The other function implemented in `jax.experimental.lapax` is a triangular solve, which you can use in place of `np.linalg.solve` when the coefficient matrix is triangular (as a Cholesky factor is). You can also compute the log determinant easily given the Cholesky factor, using the facts that the determinant of a triangular matrix is the product of the diagonal entries, and the fact that the determinant of a product of two matrices is the product of their determinants. Those are the most efficient ways to implement these computations in NumPy too (because Cholesky requires fewer FLOPs than LU, and because reusing the factor saves a lot of work in the logdet calculation). We should add direct support for `np.log10` and `np.diag`, but for now you can get them by computing `np.log(x) / np.log(10)` and by using fancy indexing, respectively. Awesome! Almost there, I just need to figure out how to get the correct arguments for `lapax.solve_triangular`: ``` def solve_triangular(a, b, left_side, lower, trans_a, block_size=1): """An unrolled triangular solve.""" return _solve_triangular_right(LapaxMatrix(a, block_size), LapaxMatrix(b, block_size), left_side, lower, trans_a).ndarray ``` What are `left_side`, `lower` and `trans_a`? `left_side`, `lower`, and `trans_a` have roughly the same meaning they do in LAPACK or scipy: http://www.netlib.org/lapack/explore-html/de/da7/dtrsm_8f_source.html There's also some documentation on a similar C++ API here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/client/lib/triangular_solve.h I'm also actively working on adding `cholesky` and `solve_triangular` implementations to JAX, with the standard numpy/scipy APIs.
2018-12-14T00:29:58
google/jax
113
google__jax-113
[ "108" ]
87ee4b7c5642ad31b90f3ef3c51b9aef83c80377
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -111,7 +111,12 @@ def convert_element_type(operand, new_dtype): return operand def bitcast_convert_type(operand, new_dtype): - return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype) + new_dtype = xla_bridge.canonicalize_dtype(new_dtype) + old_dtype = _dtype(operand) + if old_dtype != new_dtype: + return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype) + else: + return operand def clamp(min, operand, max): return clamp_p.bind(min, operand, max) @@ -256,8 +261,8 @@ def reduce(operand, init_value, computation, dimensions): return monoid_reducer(operand, dimensions) else: jaxpr, consts = _reduction_jaxpr(computation, init_value) - return reduce_p.bind(operand, init_value, jaxpr=jaxpr, consts=consts, - dimensions=tuple(dimensions)) + return reduce_p.bind(operand, init_value, computation=computation, + jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions)) def _reduction_jaxpr(computation, init_value): pval = _abstractify(init_value) @@ -273,18 +278,26 @@ def _get_monoid_reducer(monoid_op, x): return aval.val == _get_max_identity(aval.dtype) and _reduce_max elif monoid_op is min: return aval.val == _get_min_identity(aval.dtype) and _reduce_min + elif monoid_op is bitwise_or and aval.dtype == onp.bool_: + return aval.val == _get_max_identity(aval.dtype) and _reduce_or + elif monoid_op is bitwise_and and aval.dtype == onp.bool_: + return aval.val == _get_min_identity(aval.dtype) and _reduce_and def _get_max_identity(dtype): if onp.issubdtype(dtype, onp.floating): return onp.array(-onp.inf, dtype) elif onp.issubdtype(dtype, onp.integer): return onp.array(onp.iinfo(dtype).min, dtype) + elif onp.issubdtype(dtype, onp.bool_): + return onp.array(False, onp.bool_) def _get_min_identity(dtype): if onp.issubdtype(dtype, onp.floating): return onp.array(onp.inf, dtype) elif onp.issubdtype(dtype, onp.integer): return onp.array(onp.iinfo(dtype).max, dtype) + elif onp.issubdtype(dtype, onp.bool_): + return onp.array(True, onp.bool_) def _reduce_sum(operand, axes): return reduce_sum_p.bind(operand, axes=tuple(axes), input_shape=operand.shape) @@ -295,6 +308,12 @@ def _reduce_max(operand, axes): def _reduce_min(operand, axes): return reduce_min_p.bind(operand, axes=tuple(axes)) +def _reduce_or(operand, axes): + return reduce_or_p.bind(operand, axes=tuple(axes)) + +def _reduce_and(operand, axes): + return reduce_and_p.bind(operand, axes=tuple(axes)) + def reduce_window(operand, init_value, computation, window_dimensions, window_strides, padding): monoid_reducer = _get_monoid_window_reducer(computation, init_value) @@ -1435,7 +1454,7 @@ def pad_batch_rule(batched_args, batch_dims, padding_config): padding_config.insert(operand_bdim, (0, 0, 0)) return pad(operand, padding_value, padding_config), operand_bdim else: - raise NotImplementedError + raise NotImplementedError # loop and stack pad_p = standard_primitive(pad_shape_rule, _input_dtype, 'pad') ad.deflinear(pad_p, pad_transpose) @@ -1815,20 +1834,31 @@ def index_untake_transpose_rule(t, src, dst, *idxs, **kwargs): ad.primitive_transposes[index_untake_p] = index_untake_transpose_rule -def reduce_shape_rule(operand, init_value, jaxpr, consts, dimensions): +def reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions): return tuple(onp.delete(operand.shape, dimensions)) -def reduce_translation_rule(c, operand, init_value, jaxpr, consts, dimensions): +def reduce_translation_rule(c, operand, init_value, computation, jaxpr, consts, dimensions): xla_computation = _reduction_computation(c, jaxpr, consts, init_value) return c.Reduce(operand, init_value, xla_computation, dimensions) +def reduce_batch_rule(batched_args, batch_dims, computation, jaxpr, consts, dimensions): + operand, init_value = batched_args + operand_bdim, init_value_bdim = batch_dims + if init_value_bdim is None: + assert operand_bdim is not None + new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions] + new_operand_bdim = operand_bdim - onp.sum(onp.less(dimensions, operand_bdim)) + return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim + else: + raise NotImplementedError # loop and stack + def _reduction_computation(c, jaxpr, consts, init_value): shape = c.GetShape(init_value) return xla.jaxpr_computation(jaxpr, consts, (), shape, shape) reduce_p = standard_primitive(reduce_shape_rule, _input_dtype, 'reduce', reduce_translation_rule) -batching.defreducer(reduce_p) +# batching.primitive_batchers[reduce_p] = reduce_batch_rule # TODO(mattjj): test def reduce_sum_shape_rule(operand, axes, input_shape): @@ -1890,6 +1920,31 @@ def reduce_chooser_jvp_rule(g, ans, operand, axes): batching.defreducer(reduce_min_p) +def reduce_logical_shape_rule(operand, axes): + if operand.dtype != onp.bool_: + msg = "logical reduction requires operand dtype bool, got {}." + raise TypeError(msg.format(operand.dtype)) + return tuple(onp.delete(operand.shape, axes)) + +def reduce_logical_translation_rule(prim, identity, c, operand, axes): + scalar = xla_bridge.Shape.array_shape(onp.bool_, ()) + return c.Reduce(operand, c.Constant(identity(onp.bool_)), + xla.primitive_computation(prim, scalar, scalar), axes) + +reduce_or_translation_rule = partial(reduce_logical_translation_rule, + or_p, _get_max_identity) +reduce_or_p = standard_primitive(reduce_logical_shape_rule, _fixed_dtype(onp.bool_), + 'reduce_or', reduce_or_translation_rule) +batching.defreducer(reduce_or_p) + + +reduce_and_translation_rule = partial(reduce_logical_translation_rule, + and_p, _get_min_identity) +reduce_and_p = standard_primitive(reduce_logical_shape_rule, _fixed_dtype(onp.bool_), + 'reduce_and', reduce_and_translation_rule) +batching.defreducer(reduce_and_p) + + def reduce_window_shape_rule(operand, init_value, jaxpr, consts, window_dimensions, window_strides, padding): if operand.dtype != init_value.dtype: diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -25,8 +25,7 @@ from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray from ..interpreters.xla import DeviceArray from .. import lax -from ..util import memoize -from ..util import get_module_functions +from ..util import memoize, partial, get_module_functions from ..lib import xla_bridge # To provide the same module-level names as Numpy, we need to redefine builtins @@ -591,15 +590,16 @@ def round(a, decimals=0): ### Reducers -def _make_reduction(np_fun, op, init_val): +def _make_reduction(np_fun, op, init_val, preproc=None): """Creates reduction function given a binary operation and monoid identity.""" - @_wraps(op) + @_wraps(np_fun) def reduction(a, axis=None, dtype=None, out=None, keepdims=False): if out is not None: raise ValueError("reduction does not support `out` argument.") a = a if isinstance(a, ndarray) else asarray(a) + a = preproc(a) if preproc else a dims = _reduction_dims(a, axis) result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a)))) if _dtype(a) != result_dtype: @@ -614,7 +614,6 @@ def reduction(a, axis=None, dtype=None, out=None, keepdims=False): return reduction - def _reduction_dims(a, axis): if axis is None: return onp.arange(ndim(a)) @@ -625,7 +624,6 @@ def _reduction_dims(a, axis): else: raise TypeError("Unexpected type of axis argument: {}".format(type(axis))) - def _reduction_init_val(a, init_val): a_dtype = xla_bridge.canonicalize_dtype(_dtype(a)) try: @@ -635,13 +633,14 @@ def _reduction_init_val(a, init_val): sign, iinfo = onp.sign(init_val), onp.iinfo(a_dtype) return onp.array(iinfo.min if sign < 0 else iinfo.max, dtype=a_dtype) +_cast_to_bool = partial(lax.convert_element_type, new_dtype=onp.bool_) sum = _make_reduction(onp.sum, lax.add, 0) prod = _make_reduction(onp.prod, lax.mul, 1) max = _make_reduction(onp.max, lax.max, -onp.inf) min = _make_reduction(onp.min, lax.min, onp.inf) -all = alltrue = _make_reduction(onp.all, logical_and, True) -any = sometrue = _make_reduction(onp.any, logical_or, False) +all = alltrue = _make_reduction(onp.all, lax.bitwise_and, True, _cast_to_bool) +any = sometrue = _make_reduction(onp.any, lax.bitwise_or, False, _cast_to_bool) @_wraps(onp.mean)
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -284,6 +284,13 @@ def func(xs): jacrev(func)(xs) # don't crash jacfwd(func)(xs) # don't crash + def testAny(self): + # test modeling the code in https://github.com/google/jax/issues/108 + + ans = vmap(np.any)(np.array([[True, False], [False, False]])) + expected = np.array([True, False]) + self.assertAllClose(ans, expected, check_dtypes=True) + if __name__ == '__main__': absltest.main() diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -142,8 +142,8 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None): ] JAX_REDUCER_NO_DTYPE_RECORDS = [ - op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_default(), []), - op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_default(), []), + op_record("all", 1, default_dtypes + bool_dtypes, all_shapes, jtu.rand_some_zero(), []), + op_record("any", 1, default_dtypes + bool_dtypes, all_shapes, jtu.rand_some_zero(), []), op_record("max", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), op_record("min", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), ]
Batching broken for non-monoidal reducers repro: >>> import jax.numpy as np >>> from jax import vmap >>> vmap(np.any)(np.array([[True, False], [False, False]])) jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU. warnings.warn('No GPU found, falling back to CPU.') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "jax/api.py", line 149, in batched_fun out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes) File "jax/interpreters/batching.py", line 43, in batch out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims) File "jax/linear_util.py", line 85, in call_wrapped ans = self.f(*args, **self.kwargs) File "jax/numpy/lax_numpy.py", line 607, in reduction result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims) File "jax/lax.py", line 260, in reduce dimensions=tuple(dimensions)) File "jax/core.py", line 74, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "jax/interpreters/batching.py", line 119, in process_primitive val_out, dim_out = batched_primitive(vals_in, dims_in, **params) TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
2018-12-14T16:16:15
google/jax
117
google__jax-117
[ "67" ]
c268929f2d3452dc4c0f93ef57c44459ee477a82
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1027,8 +1027,8 @@ def conv_general_dilated_transpose_rhs( dimension_numbers, lhs_shape, rhs_shape): assert type(dimension_numbers) is ConvDimensionNumbers lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers) - transposed = map(_conv_transpose, dimension_numbers) - trans_dimension_numbers = ConvDimensionNumbers(*transposed) + lhs_trans, rhs_trans, out_trans = map(_conv_transpose, dimension_numbers) + trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans) padding = _conv_general_vjp_rhs_padding( onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims), window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
diff --git a/tests/examples_test.py b/examples/examples_test.py similarity index 100% rename from tests/examples_test.py rename to examples/examples_test.py diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -46,7 +46,7 @@ flags.DEFINE_integer( 'num_generated_cases', - os.getenv('JAX_NUM_GENERATED_CASES', 100), + os.getenv('JAX_NUM_GENERATED_CASES', 10), help='Number of generated cases to test') EPS = 1e-4 @@ -71,6 +71,7 @@ def numpy_close(a, b, atol=ATOL, rtol=RTOL, equal_nan=False): if testing_tpu or testing_x32: atol = max(atol, 1e-1) rtol = max(rtol, 1e-1) + assert a.shape == b.shape return onp.allclose(a, b, atol=atol * a.size, rtol=rtol * b.size, equal_nan=equal_nan) diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -139,7 +139,7 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None): op_record("prod", 1, default_dtypes, all_shapes, jtu.rand_small_positive(), []), op_record("sum", 1, default_dtypes, all_shapes, jtu.rand_default(), []), op_record("var", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), - op_record("std", 1, default_dtypes, nonempty_shapes, jtu.rand_default(), []), + op_record("std", 1, float_dtypes, nonempty_shapes, jtu.rand_default(), []), ] JAX_REDUCER_NO_DTYPE_RECORDS = [ diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1580,10 +1580,13 @@ def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides, "rhs_dil": rhs_dil, "rng": rng, "dimension_numbers": dim_nums, "perms": perms} for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in [ - ((b, i, 5, 6), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)], - [((0, 0), (0, 0)), ((1, 0), (0, 1)), ((0, -1), (0, 0))], - [(1, 1), (2, 1)], [(1, 1)]) - for b, i, j in itertools.product([2, 3], repeat=3)] + ((b, i, 6, 7), # lhs_shape + (j, i, 1, 2), # rhs_shape + [(1, 1), (1, 2), (2, 1)], # strides + [((0, 0), (0, 0)), ((1, 0), (0, 1)), ((0, -1), (0, 0))], # pads + [(1, 1), (2, 1)], # lhs_dils + [(1, 1)]) # rhs_dils + for b, i, j in itertools.product([1, 2], repeat=3)] for strides in all_strides for rhs_dil in rhs_dils for lhs_dil in lhs_dils @@ -1592,7 +1595,7 @@ def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides, for rng in [jtu.rand_default()] for dim_nums, perms in [ (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])), - # (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])) + (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])) ])) @jtu.skip_on_devices("tpu") def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides,
Open source tests We'd like contributors to be able to make sure their code isn't breaking anything, and that they add tests that cover their own contributions.
2018-12-15T02:43:47
google/jax
131
google__jax-131
[ "125" ]
5a4a066ae6415cde5b96f8cd98217a58e7635cd3
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -596,7 +596,7 @@ def _make_reduction(np_fun, op, init_val, preproc=None): @_wraps(np_fun) def reduction(a, axis=None, dtype=None, out=None, keepdims=False): if out is not None: - raise ValueError("reduction does not support `out` argument.") + raise ValueError("reduction does not support the `out` argument.") a = a if isinstance(a, ndarray) else asarray(a) a = preproc(a) if preproc else a @@ -644,7 +644,10 @@ def _reduction_init_val(a, init_val): @_wraps(onp.mean) -def mean(a, axis=None, dtype=None, keepdims=False): +def mean(a, axis=None, dtype=None, out=None, keepdims=False): + if out is not None: + raise ValueError("mean does not support the `out` argument.") + if axis is None: normalizer = size(a) else: @@ -663,7 +666,10 @@ def mean(a, axis=None, dtype=None, keepdims=False): @_wraps(onp.var) -def var(a, axis=None, dtype=None, keepdims=False, ddof=0): +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + if out is not None: + raise ValueError("var does not support the `out` argument.") + if ddof != 0: raise NotImplementedError("Only implemented for ddof=0.") if dtype is None: @@ -677,8 +683,10 @@ def var(a, axis=None, dtype=None, keepdims=False, ddof=0): @_wraps(onp.std) -def std(a, axis=None, dtype=None, keepdims=False, ddof=0): - return sqrt(var(a, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof)) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + if out is not None: + raise ValueError("std does not support the `out` argument.") + return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims)) @_wraps(onp.allclose)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -27,6 +27,7 @@ import numpy as onp from jax import api +from jax import lax from jax import numpy as lnp from jax import test_util as jtu @@ -810,6 +811,12 @@ def testRavel(self): # TODO(mattjj): test other ndarray-like method overrides + def testOnpMean(self): + # from https://github.com/google/jax/issues/125 + x = lax.add(lnp.eye(3), 0.) + ans = onp.mean(x) + self.assertAllClose(ans, onp.array([1./3, 1./3, 1./3]), check_dtypes=False) + if __name__ == "__main__": absltest.main()
Improper kwarg forwarding for np.mean Steps to reproduce ``` import jax.numpy as np import numpy as onp x = lax.add(np.eye(3), 0.) onp.mean(x) ``` Traceback: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-91-45d207ceb919> in <module>() 1 x = lax.add(np.eye(3), 0.) ----> 2 onp.mean(x) ~/anaconda/lib/python3.5/site-packages/numpy/core/fromnumeric.py in mean(a, axis, dtype, out, keepdims) 2915 pass 2916 else: -> 2917 return mean(axis=axis, dtype=dtype, out=out, **kwargs) 2918 2919 return _methods._mean(a, axis=axis, dtype=dtype, TypeError: mean() got an unexpected keyword argument 'out' ```
Discussing with @dougalm, this might be intended behavior or a bug on NumPy's part? Calling `onp.mean(x.copy())` works, so it might be NumPy not properly using DeviceArray's `__array__` method.
2018-12-17T22:40:26
google/jax
141
google__jax-141
[ "37" ]
bd5c1fd130f59b307e486f0e2d19bfad028f8863
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -16,27 +16,29 @@ from __future__ import division from __future__ import print_function -from six.moves import builtins +import collections +import itertools +import string -import six import numpy as onp +import opt_einsum +import six +from six.moves import builtins +from jax import jit from .. import core from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray from ..interpreters.xla import DeviceArray from .. import lax -from ..util import memoize, partial, get_module_functions, prod as _prod +from ..util import memoize, partial, get_module_functions, unzip2, prod as _prod from ..lib import xla_bridge -# To provide the same module-level names as Numpy, we need to redefine builtins -# and also use some common names (like 'shape' and 'dtype') at the top-level. -# pylint: disable=redefined-builtin,redefined-outer-name - -# There might be a pylint bug with tuple unpacking. -# pylint: disable=unbalanced-tuple-unpacking - -# We get docstrings from the underlying numpy functions. -# pylint: disable=missing-docstring +if six.PY3: + def removechars(s, chars): + return s.translate(str.maketrans(dict.fromkeys(chars))) +else: + def removechars(s, chars): + return s.translate(None, ''.join(chars)) # We replace some builtin names to follow Numpy's API, so we capture here. @@ -1054,6 +1056,172 @@ def tensordot(a, b, axes=2): raise TypeError(msg) +def einsum(*operands): + # using einsum_call=True here is an internal api for opt_einsum + operands, contractions = opt_einsum.contract_path( + *operands, einsum_call=True, use_blas=True) + contractions = tuple(data[:3] for data in contractions) + return _einsum(operands, contractions) + + +@partial(jit, static_argnums=(1,)) +def _einsum(operands, contractions): + operands = list(_promote_dtypes(*operands)) + sum = lambda x, axes: lax.reduce(x, onp.array(0, x.dtype), lax.add, axes) + + def sum_uniques(operand, names, uniques): + if uniques: + axes = [names.index(name) for name in uniques] + operand = sum(operand, axes) + names = removechars(names, uniques) + return operand, names + + def sum_repeats(operand, names, counts, keep_names): + for name, count in counts.items(): + if count > 1: + axes = [i for i, n in enumerate(names) if n == name] + eye = lax.broadcasted_eye(operand.dtype, operand.shape, axes) + if name not in keep_names: + operand = sum(operand * eye, axes) + names = names.replace(name, '') + else: + operand = sum(operand * eye, axes[:-1]) + names = names.replace(name, '', count - 1) + return operand, names + + for operand_indices, contracted_names, einstr in contractions: + input_str, result_names = einstr.split('->') + input_names = input_str.split(',') + + # switch on the number of operands to be processed in this loop iteration. + # every case here sets 'result' and 'names'. + if len(operand_indices) == 1: + operand = operands.pop(operand_indices[0]) + names, = input_names + counts = collections.Counter(names) + + # sum out unique contracted indices with a single reduce-sum + uniques = [name for name in contracted_names if counts[name] == 1] + operand, names = sum_uniques(operand, names, uniques) + + # for every repeated index, do a contraction against an identity matrix + operand, names = sum_repeats(operand, names, counts, result_names) + + elif len(operand_indices) == 2: + lhs, rhs = map(operands.pop, operand_indices) + lhs_counts, rhs_counts = map(collections.Counter, input_names) + lhs_names, rhs_names = input_names + + # sum out unique contracted indices in lhs and rhs + lhs_uniques = [name for name in contracted_names + if lhs_counts[name] == 1 and rhs_counts[name] == 0] + lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques) + + rhs_uniques = [name for name in contracted_names + if rhs_counts[name] == 1 and lhs_counts[name] == 0] + rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques) + + # for every repeated index, contract against an identity matrix + lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts, + result_names + rhs_names) + rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts, + result_names + lhs_names) + + contracted_names = contracted_names & (set(lhs_names) | set(rhs_names)) + batch_names = set(lhs_names) & set(rhs_names) - contracted_names + lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n)) + for n in batch_names) + if contracted_names: + # contract usint lax.dot_general + lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n)) + for n in contracted_names) + + # lax.dot_general batch dims have to precede non-batch dims + batch_dims = tuple(range(len(batch_names))) + if lhs_batch != rhs_batch or set(lhs_batch) != set(batch_dims): + lhs = moveaxis(lhs, lhs_batch, batch_dims) + rhs = moveaxis(rhs, rhs_batch, batch_dims) + batch_names = ''.join(batch_names) + else: + batch_dims = tuple(lhs_batch) + batch_names = ''.join(lhs_names[i] for i in batch_dims) + + operand = _dot_general(lhs, rhs, lhs_cont, rhs_cont, len(batch_dims)) + deleted_names = batch_names + ''.join(contracted_names) + names = (batch_names + removechars(lhs_names, deleted_names) + + removechars(rhs_names, deleted_names)) + else: + # no contraction, just a tensor product + if lhs_batch != rhs_batch: + rhs = moveaxis(rhs, rhs_batch, lhs_batch) + batch_names = ''.join(lhs_names[i] for i in lhs_batch) + + names = batch_names + lhs_names + rhs_names + lhs_shape = iter(lhs.shape) + lhs_shape = [next(lhs_shape) if n in batch_names + lhs_names else 1 + for n in names] + rhs_shape = iter(rhs.shape) + rhs_shape = [next(rhs_shape) if n in batch_names + rhs_names else 1 + for n in names] + operand = lax.reshape(lhs, lhs_shape) * lax.reshape(rhs, rhs_shape) + + else: + raise NotImplementedError + + # the resulting 'operand' with axis labels 'names' should be a permutation + # of the desired result + assert len(names) == len(result_names) == len(set(names)) + assert set(names) == set(result_names) + if names != result_names: + perm = tuple([names.index(name) for name in result_names]) + operand = lax.transpose(operand, perm) + operands.append(operand) # used in next iteration + + return operands[0] + + +def _dot_general(lhs, rhs, lhs_cont, rhs_cont, nbatch): + """Helper for einsum contractions.""" + # lax.dot_general has some tight constraints on dimension_numbers that this + # wrapper loosens via transposes and reshapes + assert len(lhs_cont) == len(rhs_cont) > 0 + ncont = len(lhs_cont) + lhs_ntensor = lhs.ndim - nbatch - ncont + rhs_ntensor = rhs.ndim - nbatch - ncont + batch_dims = tuple(range(nbatch)) + + if ncont == 1 and 0 <= lhs_ntensor <= 1 and 0 <= rhs_ntensor <= 1: + dimension_numbers = [(lhs_cont, rhs_cont), (batch_dims, batch_dims)] + return lax.dot_general(lhs, rhs, dimension_numbers) + else: + # move contracting dimensions to the end. lax.dot_general only allows one + # contracting dimension, so if there's more than one we collapse them. + if ncont > 1: + lhs_cdims = tuple(range(lhs.ndim - ncont, lhs.ndim)) + lhs = moveaxis(lhs, lhs_cont, lhs_cdims).reshape(lhs.shape[:-ncont] + (-1,)) + + rhs_cdims = tuple(range(rhs.ndim - ncont, rhs.ndim)) + rhs = moveaxis(rhs, rhs_cont, rhs_cdims).reshape(rhs.shape[:-ncont] + (-1,)) + else: + lhs = moveaxis(lhs, lhs_cont[0], -1) + rhs = moveaxis(rhs, rhs_cont[0], -1) + + # lax.dot_general only allows zero or one tensor product dims per operand, + # so if there's more than one we collapse them. + result_shape = lhs.shape[:nbatch] + lhs.shape[nbatch:-1] + rhs.shape[nbatch:-1] + + if lhs_ntensor > 1: + lhs = lhs.reshape(lhs.shape[:nbatch] + (-1,) + lhs.shape[-1:]) + + if rhs_ntensor > 1: + rhs = rhs.reshape(rhs.shape[:nbatch] + (-1,) + rhs.shape[-1:]) + + lhs_cont, rhs_cont = [lhs.ndim - 1], [rhs.ndim - 1] + dimension_numbers = [(lhs_cont, rhs_cont), (batch_dims, batch_dims)] + result = lax.dot_general(lhs, rhs, dimension_numbers) + return lax.reshape(result, result_shape) + + ### Misc
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py new file mode 100644 --- /dev/null +++ b/tests/lax_numpy_einsum_test.py @@ -0,0 +1,166 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as onp +from absl.testing import absltest + +import jax.numpy as np +import jax.test_util as jtu + +from jax.config import config +config.parse_flags_with_absl() + + +def rng(): + return onp.random.RandomState(0) + +def check(s, *ops): + a = onp.einsum(s, *ops) + b = np.einsum(s, *ops) + assert onp.allclose(a, b, atol=1e-4, rtol=1e-4) + + +class EinsumTest(jtu.JaxTestCase): + + def test_three_operands_1(self): + x = rng().randn(3) + y = rng().randn(4) + z = rng().randn(5) + s = 'i,j,k->ijk' + check(s, x, y, z) + + def test_three_operands_2(self): + x = rng().randn(3) + y = rng().randn(4) + z = rng().randn(5) + s = 'i,j,k->ijk' + check(s, x, y, z) + + def test_two_operands_1(self): + x = rng().randn(3, 4) + y = rng().randn(4) + s = 'ij,j->i' + check(s, x, y) + + def test_two_operands_2(self): + x = rng().randn(3, 4, 5) + y = rng().randn(4) + s = 'ijk,j->i' + check(s, x, y) + + def test_two_operands_3(self): + x = rng().randn(3, 4, 3) + y = rng().randn(3) + s = 'iji,i->j' + check(s, x, y) + + def test_two_operands_4(self): + x = rng().randn(3, 4) + y = rng().randn(3, 4) + s = 'ij,ij->' + check(s, x, y) + + def test_two_operands_5(self): + x = rng().randn(10, 2, 3) + y = rng().randn(3, 4) + s = 'nij,jk->nik' + check(s, x, y) + + def test_one_operand_1(self): + x = rng().randn(3, 4, 5) + s = 'ijk->j' + check(s, x) + + def test_one_operand_2(self): + x = rng().randn(3, 4, 5) + s = 'ijk->kij' + check(s, x) + + def test_one_operand_3(self): + x = rng().randn(3, 4, 5) + s = 'ijk->ki' + check(s, x) + + def test_one_operand_4(self): + x = rng().randn(3, 4, 5) + s = 'ijk->ki' + check(s, x) + + def test_one_operand_5(self): + x = rng().randn(2, 3, 4, 5) + s = '...ijk->...ki' + check(s, x) + + def test_one_operand_6(self): + x = rng().randn(3, 4, 5) + s = '...ijk->ki' + check(s, x) + + def test_one_operand_7(self): + x = rng().randn(3, 3) + s = 'ii->' + check(s, x) + + def test_one_operand_8(self): + x = rng().randn(3, 3) + s = 'ij->' + check(s, x) + + def test_one_operand_9(self): + x = rng().randn(3, 3, 3) + s = 'iii->' + check(s, x) + + def test_one_operand_10(self): + x = rng().randn(3, 3) + s = 'ii->i' + check(s, x) + + def test_one_operand_11(self): + x = rng().randn(3, 3, 4) + s = 'iij->i' + check(s, x) + + def test_one_operand_12(self): + x = rng().randn(3, 3, 3) + s = 'iii->i' + check(s, x) + + def test_one_operand_13(self): + x = rng().randn(3, 3, 5, 4, 4) + s = 'iijkk->i' + check(s, x) + + def test_one_operand_14(self): + x = rng().randn(3, 3, 5, 4, 4) + s = 'iijkk->ik' + check(s, x) + + def test_one_operand_15(self): + x = rng().randn(3, 3, 5, 4, 4) + s = 'iijkl->il' + check(s, x) + + def test_one_operand_16(self): + x = rng().randn(3, 3) + s = 'ij->ij' + check(s, x) + + +if __name__ == '__main__': + absltest.main()
np.einsum support support for generic tensor contractions would cover a large class of computations and also provide a foundation for higher order operations. Perhaps jax could then also be added as a `opt_einsum` backend?
Yes, definitely! And you're right that opt_einsum should be very useful here. Broadly speaking I think we would want to use opt_einsum to translate an `einsum` call into a sequence of calls into `lax.dot_general`, which models the [XLA DotGeneral HLO](https://www.tensorflow.org/xla/operation_semantics#dotgeneral). But to support all of np.einsum's functionality we'd need to handle a few more cases that aren't exactly tensor contractions, like `np.einsum('ii->i', A)` for extracting a matrix diagonal, `np.einsum('ii->', A)` for computing a trace, and `np.einsum('ij->', A)` for a sum over axes (contracting against an implicit all-ones array, you could say). EDIT: There are also a few annoying constraints in the current DotGeneral HLO, like [the requirement that batch dimensions precede everything else](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/shape_inference.cc#L664), and [the requirement that there is only one contracting dimension per call](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/shape_inference.cc#L647), which we'd have to work around in our lax_numpy.py implementation. I believe the XLA folks are planning to make DotGeneral more convenient to use in the near future, but I'm not sure when. Let's leave this issue open to track this feature. We can probably also implement `np.tensordot` support at the same time.
2018-12-18T17:33:38
google/jax
144
google__jax-144
[ "37" ]
87922fdf13b76990b4929be1b6e50e7d74250884
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1155,14 +1155,16 @@ def sum_repeats(operand, names, counts, keep_names): if lhs_batch != rhs_batch: rhs = moveaxis(rhs, rhs_batch, lhs_batch) batch_names = ''.join(lhs_names[i] for i in lhs_batch) + nbatch = len(batch_names) - names = batch_names + lhs_names + rhs_names + assert len(lhs_names) == lhs.ndim and len(rhs_names) == rhs.ndim + assert lhs_names.startswith(batch_names) and rhs_names.startswith(batch_names) + + names = batch_names + lhs_names[nbatch:] + rhs_names[nbatch:] lhs_shape = iter(lhs.shape) - lhs_shape = [next(lhs_shape) if n in batch_names + lhs_names else 1 - for n in names] + lhs_shape = [next(lhs_shape) if n in lhs_names else 1 for n in names] rhs_shape = iter(rhs.shape) - rhs_shape = [next(rhs_shape) if n in batch_names + rhs_names else 1 - for n in names] + rhs_shape = [next(rhs_shape) if n in rhs_names else 1 for n in names] operand = lax.reshape(lhs, lhs_shape) * lax.reshape(rhs, rhs_shape) else:
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py --- a/tests/lax_numpy_einsum_test.py +++ b/tests/lax_numpy_einsum_test.py @@ -81,6 +81,13 @@ def test_two_operands_5(self): s = 'nij,jk->nik' check(s, x, y) + def test_two_operands_6(self): + # based on https://github.com/google/jax/issues/37#issuecomment-448572187 + x = rng().randn(2, 1) + y = rng().randn(2, 3, 4) + s = 'sa,shb->shab' + check(s, x, y) + def test_one_operand_1(self): x = rng().randn(3, 4, 5) s = 'ijk->j'
np.einsum support support for generic tensor contractions would cover a large class of computations and also provide a foundation for higher order operations. Perhaps jax could then also be added as a `opt_einsum` backend?
Yes, definitely! And you're right that opt_einsum should be very useful here. Broadly speaking I think we would want to use opt_einsum to translate an `einsum` call into a sequence of calls into `lax.dot_general`, which models the [XLA DotGeneral HLO](https://www.tensorflow.org/xla/operation_semantics#dotgeneral). But to support all of np.einsum's functionality we'd need to handle a few more cases that aren't exactly tensor contractions, like `np.einsum('ii->i', A)` for extracting a matrix diagonal, `np.einsum('ii->', A)` for computing a trace, and `np.einsum('ij->', A)` for a sum over axes (contracting against an implicit all-ones array, you could say). EDIT: There are also a few annoying constraints in the current DotGeneral HLO, like [the requirement that batch dimensions precede everything else](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/shape_inference.cc#L664), and [the requirement that there is only one contracting dimension per call](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/shape_inference.cc#L647), which we'd have to work around in our lax_numpy.py implementation. I believe the XLA folks are planning to make DotGeneral more convenient to use in the near future, but I'm not sure when. Let's leave this issue open to track this feature. We can probably also implement `np.tensordot` support at the same time. Please reopen if you notice any missing cases! thanks @mattjj I tested with some existing code for which I'd like to use jax ``` import jax.numpy as np import numpy as onp second = onp.random.uniform(size = (2,3,4)) first = onp.random.uniform(size = (2,1)) result = np.einsum('sa,shb->shab',first,second) result.shape #(expected shape: (2, 3, 1, 4)) ``` which throws a `StopIteration` error, while changing `import jax.numpy as np` to `import numpy as np ` gives the expected shape Sounds like a bug!
2018-12-19T16:13:05
google/jax
146
google__jax-146
[ "145" ]
b781e4509ab4e617f5673ae45f47ff5697017f88
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -834,29 +834,17 @@ def eye(N, M=None, k=None, dtype=onp.dtype("float64")): @_wraps(onp.arange) def arange(*args, **kwargs): - nargs = len(args) - start, step = 0, 1 - dtype = kwargs.pop("dtype", None) - if kwargs: - raise TypeError("arange only accepts 'dtype' kwarg, got {}".format(kwargs)) - if nargs == 0: - raise TypeError("Required argument 'start' (pos 1) not found") # same as numpy error - elif nargs == 1: - stop, = args - dtype = dtype or _dtype(stop) - return lax.iota(dtype, stop) # avoids materializing - elif nargs == 2: - start, stop = args - dtype = dtype or onp.result_type(start, stop) - elif nargs == 3: - start, stop, step = args - dtype = dtype or onp.result_type(start, stop, step) - elif nargs == 4: - start, stop, step, dtype = args - dtype = dtype or onp.result_type(start, stop, step) - - size = (stop - start - 1) // step + 1 - return start + step * lax.iota(dtype, size) + # attempt to generate a lazy IotaConstant, otherwise fall back to raw numpy + # TODO(mattjj): add tests for this function, then re-enable + # dtype = kwargs.pop("dtype", None) + # if not args: + # raise TypeError("Required argument 'start' (pos 1) not found") # same as numpy error + # elif len(args) == 1 and not kwargs: + # stop, = args + # dtype = dtype or _dtype(stop) + # if onp.issubdtype(dtype, onp.integer): + # return lax.iota(dtype, stop) # avoids materializing + return onp.arange(*args, **kwargs) @_wraps(onp.repeat) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( name='jax', - version='0.1.12', + version='0.1.13', description='Differentiate, compile, and transform Numpy code.', author='JAX team', author_email='[email protected]',
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -845,6 +845,13 @@ def testOnpMean(self): ans = onp.mean(x) self.assertAllClose(ans, onp.array([1./3, 1./3, 1./3]), check_dtypes=False) + # TODO(mattjj): more exhaustive arange tests + def testArangeOnFloats(self): + # from https://github.com/google/jax/issues/145 + expected = onp.arange(0.0, 1.0, 0.1) + ans = lnp.arange(0.0, 1.0, 0.1) + self.assertAllClose(expected, ans, check_dtypes=True) + if __name__ == "__main__": absltest.main()
Inconsistent behaviour of arange I have installed JAX on Colab as in the QuickStart notebook. Then I run this instruction: ``` import jax.numpy as np print(np.arange(0.0, 1.0, 0.1)) ``` And I get an array with a single value [0], which is different from: ``` import numpy as np print(np.arange(0.0, 1.0, 0.1)) ``` Where I (correctly) get an array with 11 values 0, 0.1, ..., 0.9, 1.0. Is this intended?
Not intended, that's a bug! A temporary workaround is to `import numpy as onp` and just use `onp.arange` here. Ok, thanks a lot! Love the library!
2018-12-19T17:09:34
google/jax
151
google__jax-151
[ "149" ]
21e5eb13dd879f92b6ff94e18bf33a24ed8cc2a7
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -58,16 +58,17 @@ def jit(fun, static_argnums=()): Args: fun: Function to be jitted. Should be a pure function, as side-effects may - only be executed once. Its positional arguments and return value should - be arrays, scalars, or standard Python containers (tuple/list/dict) - thereof. Keyword arguments and positional arguments specified by - `static_argnums` can be anything at all. These are treated as static - (see below). + only be executed once. Its positional arguments and return value should be + arrays, scalars, or standard Python containers (tuple/list/dict) thereof. + Keyword arguments and positional arguments specified by `static_argnums` + can be anything at all. These are treated as static (see below). static_argnums: A tuple of ints. Specifies which arguments to treat as - static (compile-time constant). Operations that only depend on static - arguments will be constant-folded. Calling the jitted function with - different values for these constants will trigger recompilation. - Returns: A wrapped version of `fun`, set up for just-in-time compilation. + static (compile-time constant). Operations that only depend on static + arguments will be constant-folded. Calling the jitted function with + different values for these constants will trigger recompilation. + + Returns: + A wrapped version of `fun`, set up for just-in-time compilation. """ @_wraps(fun) def f_jitted(*args, **kwargs): @@ -83,31 +84,62 @@ def f_jitted(*args, **kwargs): f_jitted.__name__ = "jit({})".format(f_jitted.__name__) return f_jitted + def grad(fun, argnums=0): """Creates a function which evaluates the gradient of `fun`. Args: fun: Function to be differentiated. Its arguments at positions specified by - `argnums` should be arrays, scalars, or standard Python containers. It - should return a scalar (which includes arrays with shape `()` but not - arrays with shape `(1,)` etc.) + `argnums` should be arrays, scalars, or standard Python containers. It + should return a scalar (which includes arrays with shape `()` but not + arrays with shape `(1,)` etc.) argnums: Integer or tuple of integers. Specifies which positional - argument(s) to differentiate with respect to. - Returns: A function with the same arguments as `fun`, that evaluates the - gradient of `fun`. If `argnums` is an integer then the gradient has the - same shape and type as the positional argument indicated by that integer. - If argnums is a tuple of integers, the gradient is a tuple of values with - the same shapes and types as the corresponding arguments. + argument(s) to differentiate with respect to. + + Returns: + A function with the same arguments as `fun`, that evaluates the gradient of + `fun`. If `argnums` is an integer then the gradient has the same shape and + type as the positional argument indicated by that integer. If argnums is a + tuple of integers, the gradient is a tuple of values with the same shapes + and types as the corresponding arguments. """ + value_and_grad_f = value_and_grad(fun, argnums) + def grad_f(*args, **kwargs): + ans, g = value_and_grad_f(*args, **kwargs) + return g + + return grad_f + +def value_and_grad(fun, argnums=0): + """Creates a function which evaluates both `fun` and the gradient of `fun`. + + Args: + fun: Function to be differentiated. Its arguments at positions specified by + `argnums` should be arrays, scalars, or standard Python containers. It + should return a scalar (which includes arrays with shape `()` but not + arrays with shape `(1,)` etc.) + argnums: Integer or tuple of integers. Specifies which positional + argument(s) to differentiate with respect to. + + Returns: + A function with the same arguments as `fun` that evaluates both `fun` and + the gradient of `fun` and returns them as a pair (a two-element tuple). If + `argnums` is an integer then the gradient has the same shape and type as the + positional argument indicated by that integer. If argnums is a tuple of + integers, the gradient is a tuple of values with the same shapes and types + as the corresponding arguments. + """ + def value_and_grad_f(*args, **kwargs): f = lu.wrap_init(fun, kwargs) f_partial, dyn_args = argnums_partial(f, argnums, args) ans, vjp_py = vjp(f_partial, *dyn_args) check_scalar(ans) g = vjp_py(onp.ones((), onp.result_type(ans))) - return g[0] if isinstance(argnums, int) else g + g = g[0] if isinstance(argnums, int) else g + return (ans, g) - return grad_f + return value_and_grad_f @curry def jacfwd(fun, x): @@ -136,11 +168,13 @@ def vmap(fun, in_axes=0, out_axes=0): Args: fun: Function to be mapped over additional axes. in_axes, out_axes: Specifies which axes to map over. These may be integers, - None, or (possibly nested) tuples of integers or None. - Returns: Batched/vectorized version of `fun` with arguments that correspond to - those of `fun`, but with extra array axes at positions indicated by - `in_axes`, and a return value that corresponds to that of `fun`, but with - extra array axes at positions indicated by `out_axes`. + None, or (possibly nested) tuples of integers or None. + + Returns: + Batched/vectorized version of `fun` with arguments that correspond to those + of `fun`, but with extra array axes at positions indicated by `in_axes`, and + a return value that corresponds to that of `fun`, but with extra array axes + at positions indicated by `out_axes`. For example, we can implement a matrix-matrix product using a vector dot product: @@ -150,7 +184,6 @@ def vmap(fun, in_axes=0, out_axes=0): mm = vmap(mv, (None, 1), 1) # ([a,b], [b,c]) -> [a,c] (`[a,b]` indicates an array with shape (a,b)) - """ def batched_fun(*args, **kwargs): if not isinstance(fun, lu.WrappedFun):
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -44,6 +44,15 @@ def f(x, y, z, flag=False): assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0 assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0) + def value_and_grad_argnums(self): + def f(x, y, z, flag=False): + assert flag + return 1.0 * x + 2.0 * y + 3.0 * z + + y = f(1.0, 1.0, 1.0, flag=True) + assert value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0) + assert value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0) + assert value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0)) def test_jit_static_args(self): side = []
Get value and gradient simultaneously In Autograd I was used to have `value_and_grad` to return both the gradient and the value of a function simultaneously. Is there any similar functionality in JAX? That would be incredibly useful for, e.g., plotting the loss while training a neural network. This is an example in Autograd: ``` grad_and_value = grad_and_value(np.tanh) v, g = grad_and_value(0.0) # v=0.0, g=1.0 ```
Great idea. If you take a look at [the grad function's implementation](https://github.com/google/jax/blob/95135377d0ed3d3954a44a650ad2580ded61037f/jax/api.py#L105), you can see that `ans` is computed but not returned. We should factor the API so that this is easy to access. @dougalm do you think it's better to set things up so that there's a separate `value_and_grad` function (and maybe `grad` is a wrapped version that discards the `ans` part of its return value), or is it worth considering just having a `return_ans` kwarg for `grad`? My usual preference is for the former, since other APIs always seem to succumb to kwarg creep, but in this case I'm not sure. I would vote for a separate function. Having kwargs affect what is returned to me is a Matlab anti-pattern, but if folx disagree, let's discuss. On Thu, Dec 20, 2018 at 10:06 AM Matthew Johnson <[email protected]> wrote: > Great idea. If you take a look at the grad function's implementation > <https://github.com/google/jax/blob/95135377d0ed3d3954a44a650ad2580ded61037f/jax/api.py#L105>, > you can see that ans is computed but not returned. We should factor the > API so that this is easy to access. > > @dougalm <https://github.com/dougalm> do you think it's better to set > things up so that there's a separate value_and_grad function (and maybe > grad is a wrapped version that discards the ans part of its return > value), or is it worth considering just having a return_ans kwarg for grad? > My usual preference is for the former, since other APIs always seem to > succumb to kwarg creep, but in this case I'm not sure. > > — > You are receiving this because you are subscribed to this thread. > Reply to this email directly, view it on GitHub > <https://github.com/google/jax/issues/149#issuecomment-449028645>, or mute > the thread > <https://github.com/notifications/unsubscribe-auth/AAJ4j_8I5WnvkDwmMv5c33RRv4YU8zzbks5u66eDgaJpZM4ZcBBm> > . > @dougalm agreed, and had a punchy way of saying the same thing @alexbw did: an argument to a function shouldn't change its type! That sounds like a knock-down argument to me. I'll add a value_and_grad function now.
2018-12-20T18:18:43
google/jax
168
google__jax-168
[ "165" ]
648fd43619db82132ca6e8c44e4c2c0a32ac3406
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1752,11 +1752,37 @@ def dynamic_slice_transpose_rule(t, operand, start_indices, slice_sizes, zeros = broadcast(_const(t, 0), operand_shape) return [dynamic_update_slice(zeros, t, start_indices), ad_util.zero] +def dynamic_slice_batching_rule(batched_args, batch_dims, slice_sizes, + **unused_kwargs): + operand, start_indices = batched_args + op_bdim, idx_bdim = batch_dims + + if idx_bdim is None: + new_start_indices = concatenate( + [start_indices[:op_bdim], _zeros(start_indices, shape=(1,)), + start_indices[op_bdim:]], 0) + new_slice_sizes = list(slice_sizes) + new_slice_sizes.insert(op_bdim, operand.shape[op_bdim]) + out = dynamic_slice(operand, new_start_indices, new_slice_sizes) + return out, op_bdim + else: + # TODO(mattjj): add support for Gather HLO, use it here + start_indices = batching.bdim_at_front(start_indices, idx_bdim) + if op_bdim is None: + out = concatenate([dynamic_slice(operand, idx, slice_sizes) + for idx in start_indices], 0) + else: + operand = batching.bdim_at_front(operand, op_bdim) + out = concatenate([dynamic_slice(op, idx, slice_sizes) + for op, idx in zip(operand, start_indices)], 0) + return out, 0 + dynamic_slice_p = standard_primitive( dynamic_slice_shape_rule, _input_dtype, 'dynamic_slice', dynamic_slice_translation_rule) ad.defjvp(dynamic_slice_p, dynamic_slice_jvp_rule, None) ad.primitive_transposes[dynamic_slice_p] = dynamic_slice_transpose_rule +batching.primitive_batchers[dynamic_slice_p] = dynamic_slice_batching_rule def dynamic_update_slice_shape_rule(operand, update, start_indices,
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -306,6 +306,26 @@ def fun(x, t): [0., 0., 0., 0., 2.]]) self.assertAllClose(ans, expected, check_dtypes=False) + def testDynamicSlice(self): + # test dynamic_slice via numpy indexing syntax + x = onp.arange(30).reshape((10, 3)) + + ans = vmap(lambda x, i: x[i], in_axes=(0, None))(x, 1) + expected = x[:, 1] + self.assertAllClose(ans, expected, check_dtypes=False) + + + idx = onp.array([0, 1, 2, 1, 0] * 2) + ans = vmap(lambda x, i: x[i], in_axes=(0, 0))(x, idx) + expected = x[onp.arange(10), idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + x = onp.arange(3) + idx = onp.array([0, 1, 2, 1, 0] * 2) + ans = vmap(lambda x, i: x[i], in_axes=(None, 0))(x, idx) + expected = x[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
Batching rule for `dyanmic_slice` not defined If you use indexing , e.g. `x[0]` in a function you `vmap`, it fails.
Related: we are also missing the XLA Gather primitive, which is the right way to write many batch dynamic slices. +1 hawkinsp, our index_take predated the Gather HLO (and I find that HLO confusing) so it could be that index_take can be replaced entirely. I’m interested to know if you think that’s possible. (I would love to take this, but probably can’t for about 36 hours.)
2018-12-23T17:30:01
google/jax
169
google__jax-169
[ "166" ]
fd3645ab89e0acc95d6df2908cedea6eac1de59e
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1438,7 +1438,7 @@ def _rewriting_take(arr, idx, axis=0): # Handle slice index (only static, otherwise an error is raised) elif isinstance(idx, slice): - if not _all(elt is None or isinstance(core.get_aval(elt), ConcreteArray) + if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray for elt in (idx.start, idx.stop, idx.step)): msg = ("Array slice indices must have static start/stop/step to be used " "with Numpy indexing syntax. Try lax.dynamic_slice instead.") @@ -1448,6 +1448,27 @@ def _rewriting_take(arr, idx, axis=0): result = lax.slice_in_dim(arr, start, limit, stride, axis=axis) return lax.rev(result, [axis]) if needs_rev else result + # Handle non-advanced bool index (only static, otherwise an error is raised) + elif (isinstance(abstract_idx, ShapedArray) and onp.issubdtype(abstract_idx.dtype, onp.bool_) + or isinstance(idx, list) and _all(not _shape(e) and onp.issubdtype(_dtype(e), onp.bool_) + for e in idx)): + if isinstance(idx, list): + idx = array(idx) + abstract_idx = core.get_aval(idx) + + if not type(abstract_idx) is ConcreteArray: + msg = ("Array boolean indices must be static (e.g. no dependence on an " + "argument to a jit or vmap function).") + raise IndexError(msg) + else: + if idx.ndim > arr.ndim or idx.shape != arr.shape[:idx.ndim]: + msg = "Boolean index shape did not match indexed array shape prefix." + raise IndexError(msg) + else: + reshaped_arr = arr.reshape((-1,) + arr.shape[idx.ndim:]) + int_idx, = onp.where(idx.ravel()) + return lax.index_take(reshaped_arr, (int_idx,), (0,)) + # Handle non-advanced tuple indices by recursing once elif isinstance(idx, tuple) and _all(onp.ndim(elt) == 0 for elt in idx): canonical_idx = _canonicalize_tuple_index(arr, idx) @@ -1487,10 +1508,11 @@ def _rewriting_take(arr, idx, axis=0): # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing elif _is_advanced_int_indexer(idx): canonical_idx = _canonicalize_tuple_index(arr, tuple(idx)) - idx_noadvanced = [slice(None) if _is_int(e) else e for e in canonical_idx] + idx_noadvanced = [slice(None) if _is_int_arraylike(e) else e + for e in canonical_idx] arr_sliced = _rewriting_take(arr, tuple(idx_noadvanced)) - advanced_pairs = ((e, i) for i, e in enumerate(canonical_idx) if _is_int(e)) + advanced_pairs = ((e, i) for i, e in enumerate(canonical_idx) if _is_int_arraylike(e)) idx_advanced, axes = zip(*advanced_pairs) idx_advanced = broadcast_arrays(*idx_advanced) @@ -1522,11 +1544,11 @@ def _is_advanced_int_indexer(idx): # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing if isinstance(idx, (tuple, list)): # We assume this check comes *after* the check for non-advanced tuple index, - # and hence we already know at least one element is a sequence - return _all(e is None or e is Ellipsis or isinstance(e, slice) or _is_int(e) - for e in idx) + # and hence we already know at least one element is a sequence if it's a tuple + return _all(e is None or e is Ellipsis or isinstance(e, slice) + or _is_int_arraylike(e) for e in idx) else: - return _is_int(idx) + return _is_int_arraylike(idx) def _is_advanced_int_indexer_without_slices(idx): @@ -1539,11 +1561,11 @@ def _is_advanced_int_indexer_without_slices(idx): return True -def _is_int(x): +def _is_int_arraylike(x): """Returns True if x is array-like with integer dtype, False otherwise.""" return (isinstance(x, int) and not isinstance(x, bool) or onp.issubdtype(getattr(x, "dtype", None), onp.integer) - or isinstance(x, (list, tuple)) and _all(_is_int(e) for e in x)) + or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x)) def _canonicalize_tuple_index(arr, idx):
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -62,11 +62,10 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): class IndexingTest(jtu.JaxTestCase): """Tests for Numpy indexing translation rules.""" - @parameterized.named_parameters(jtu.cases_from_list({ - "testcase_name": - "{}_inshape={}_indexer={}".format( - name, jtu.format_shape_dtype_string( shape, dtype), indexer), - "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer + @parameterized.named_parameters({ + "testcase_name": "{}_inshape={}_indexer={}".format( + name, jtu.format_shape_dtype_string( shape, dtype), indexer), + "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer } for name, index_specs in [ ("OneIntIndex", [ IndexSpec(shape=(3,), indexer=1), @@ -154,14 +153,14 @@ class IndexingTest(jtu.JaxTestCase): IndexSpec(shape=(3, 4), indexer=()), ]), ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) @jtu.skip_on_devices("tpu") def testStaticIndexing(self, shape, dtype, rng, indexer): args_maker = lambda: [rng(shape, dtype)] fun = lambda x: x[indexer] self._CompileAndCheck(fun, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list({ + @parameterized.named_parameters({ "testcase_name": "{}_inshape={}_indexer={}".format(name, jtu.format_shape_dtype_string( @@ -233,7 +232,7 @@ def testStaticIndexing(self, shape, dtype, rng, indexer): # IndexSpec(shape=(3, 4), indexer=()), # ]), ] for shape, indexer in index_specs for dtype in float_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) @jtu.skip_on_devices("tpu") def testStaticIndexingGrads(self, shape, dtype, rng, indexer): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None @@ -257,7 +256,7 @@ def _ReplaceSlicesWithTuples(self, idx): else: return idx, lambda x: x - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -280,7 +279,7 @@ def _ReplaceSlicesWithTuples(self, idx): ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def testDynamicIndexingWithSlicesErrors(self, shape, dtype, rng, indexer): unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer) @@ -292,7 +291,7 @@ def fun(x, unpacked_indexer): args_maker = lambda: [rng(shape, dtype), unpacked_indexer] self.assertRaises(IndexError, lambda: fun(*args_maker())) - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -312,7 +311,7 @@ def fun(x, unpacked_indexer): ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def testDynamicIndexingWithIntegers(self, shape, dtype, rng, indexer): unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer) @@ -324,7 +323,7 @@ def fun(x, unpacked_indexer): self._CompileAndCheck(fun, args_maker, check_dtypes=True) @skip - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -346,7 +345,7 @@ def fun(x, unpacked_indexer): ] for shape, indexer in index_specs for dtype in float_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def DISABLED_testDynamicIndexingWithIntegersGrads(self, shape, dtype, rng, indexer): # TODO(mattjj): re-enable (test works but for grad-of-compile, in flux) tol = 1e-2 if onp.finfo(dtype).bits == 32 else None @@ -360,7 +359,7 @@ def fun(unpacked_indexer, x): arr = rng(shape, dtype) check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol) - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -412,13 +411,13 @@ def fun(unpacked_indexer, x): ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def testAdvancedIntegerIndexing(self, shape, dtype, rng, indexer): args_maker = lambda: [rng(shape, dtype), indexer] fun = lambda x, idx: x[idx] self._CompileAndCheck(fun, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -470,14 +469,14 @@ def testAdvancedIntegerIndexing(self, shape, dtype, rng, indexer): ] for shape, indexer in index_specs for dtype in float_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def testAdvancedIntegerIndexingGrads(self, shape, dtype, rng, indexer): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None arg = rng(shape, dtype) fun = lambda x: x[indexer]**2 check_grads(fun, (arg,), 2, tol, tol, tol) - @parameterized.named_parameters(jtu.cases_from_list( + @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} @@ -533,7 +532,7 @@ def testAdvancedIntegerIndexingGrads(self, shape, dtype, rng, indexer): ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()])) + for rng in [jtu.rand_default()]) def testMixedAdvancedIntegerIndexing(self, shape, dtype, rng, indexer): indexer_with_dummies = [e if isinstance(e, onp.ndarray) else () for e in indexer] @@ -588,6 +587,49 @@ def foo(x): self.assertAllClose(a1, a2, check_dtypes=True) + def testBooleanIndexingArray1D(self): + idx = onp.array([True, True, False]) + x = api.device_put(onp.arange(3)) + ans = x[idx] + expected = onp.arange(3)[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + def testBooleanIndexingList1D(self): + idx = [True, True, False] + x = api.device_put(onp.arange(3)) + ans = x[idx] + expected = onp.arange(3)[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + def testBooleanIndexingArray2DBroadcast(self): + idx = onp.array([True, True, False, True]) + x = onp.arange(8).reshape(4, 2) + ans = api.device_put(x)[idx] + expected = x[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + def testBooleanIndexingList2DBroadcast(self): + idx = [True, True, False, True] + x = onp.arange(8).reshape(4, 2) + ans = api.device_put(x)[idx] + expected = x[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + def testBooleanIndexingArray2D(self): + idx = onp.array([[True, False], + [False, True], + [False, False], + [True, True]]) + x = onp.arange(8).reshape(4, 2) + ans = api.device_put(x)[idx] + expected = x[idx] + self.assertAllClose(ans, expected, check_dtypes=False) + + def testBooleanIndexingDynamicShapeError(self): + x = onp.zeros(3) + i = onp.array([True, True, False]) + self.assertRaises(IndexError, lambda: api.jit(lambda x, i: x[i])(x, i)) + if __name__ == "__main__": absltest.main() diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -50,7 +50,8 @@ def args_maker(): x, a, b, loc, scale = map(rng, shapes, dtypes) return [x, onp.abs(a), onp.abs(b), loc, onp.abs(scale)] - self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True, + tol=1e-4) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) @genNamedParametersNArgs(3, jtu.rand_default())
boolean indexing support The `README` mentions boolean index slices `a[idx]` in relation to jit-able functions as something to avoid, but currently it seems they do not work in jax at all ``` >>> import jax.numpy as np >>> a = np.array([1,2,3]) >>> a[[True,False,False]] ``` raises `IndexError: Indexing mode not yet supported. Open a feature request!` is this expected?
Thanks for opening this! It’s something we could and should support without `jit`, but we haven’t implemented it yet. We should add it! (Sorry if the readme is unclear on the current level of support.)
2018-12-23T19:05:32
google/jax
175
google__jax-175
[ "174" ]
b2d6ce175a0193ba0a7cb6a851f4f9910a1e1dc0
diff --git a/jax/experimental/stax.py b/jax/experimental/stax.py --- a/jax/experimental/stax.py +++ b/jax/experimental/stax.py @@ -212,6 +212,18 @@ def FanInSum(): FanInSum = FanInSum() +def FanInConcat(axis=-1): + """Layer construction function for a fan-in concatenation layer.""" + def init_fun(input_shape): + ax = axis % len(input_shape[0]) + concat_size = sum(shape[ax] for shape in input_shape) + out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:] + return out_shape, () + def apply_fun(params, inputs, rng=None): + return np.concatenate(inputs, axis) + return init_fun, apply_fun + + def Dropout(rate, mode='train'): """Layer construction function for a dropout layer with given rate.""" def init_fun(input_shape): diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( name='jax', - version='0.1.14', + version='0.1.15', description='Differentiate, compile, and transform Numpy code.', author='JAX team', author_email='[email protected]',
diff --git a/tests/stax_test.py b/tests/stax_test.py --- a/tests/stax_test.py +++ b/tests/stax_test.py @@ -30,9 +30,18 @@ config.parse_flags_with_absl() +def random_inputs(rng, input_shape): + if type(input_shape) is tuple: + return rng.randn(*input_shape).astype(onp.float32) + elif type(input_shape) is list: + return [random_inputs(rng, shape) for shape in input_shape] + else: + raise TypeError(type(input_shape)) + + def _CheckShapeAgreement(test_case, init_fun, apply_fun, input_shape): result_shape, params = init_fun(input_shape) - inputs = onp.random.RandomState(0).randn(*input_shape).astype("float32") + inputs = random_inputs(onp.random.RandomState(0), input_shape) rng_key = random.PRNGKey(0) result = apply_fun(params, inputs, rng_key) test_case.assertEqual(result.shape, result_shape) @@ -130,6 +139,26 @@ def testDropoutShape(self, input_shape): init_fun, apply_fun = stax.Dropout(0.9) _CheckShapeAgreement(self, init_fun, apply_fun, input_shape) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_input_shape={}".format(input_shape), + "input_shape": input_shape} + for input_shape in [(3, 4), (2, 5, 6, 1)])) + def testFanInSum(self, input_shape): + init_fun, apply_fun = stax.FanInSum + _CheckShapeAgreement(self, init_fun, apply_fun, [input_shape, input_shape]) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inshapes={}_axis={}".format(input_shapes, axis), + "input_shapes": input_shapes, "axis": axis} + for input_shapes, axis in [ + ([(2, 3), (2, 1)], 1), + ([(2, 3), (2, 1)], -1), + ([(1, 2, 4), (1, 1, 4)], 1), + ])) + def testFanInConcat(self, input_shapes, axis): + init_fun, apply_fun = stax.FanInConcat(axis) + _CheckShapeAgreement(self, init_fun, apply_fun, input_shapes) + if __name__ == "__main__": absltest.main()
Need a function for concat layers in stax, maybe FanInCat Sometimes we need concat many layers. So like parallel, FanOut and FanInSum, we need FanInCat
2018-12-31T00:54:19
google/jax
178
google__jax-178
[ "176" ]
61d5d79e393dd43f645f06ba06589ff396ccdb13
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1070,6 +1070,27 @@ def diag(v, k=0): raise ValueError("diag input must be 1d or 2d") +@_wraps(onp.polyval) +def polyval(p, x): + if isinstance(p, onp.poly1d): + p = onp.asarray(p) + if isinstance(x, onp.poly1d): + y = 0 + else: + y = zeros_like(x) + for i in range(len(p)): + y = y * x + p[i] + return y + + +@_wraps(onp.append) +def append(arr, values, axis=None): + if axis is None: + return concatenate([ravel(arr), ravel(values)], 0) + else: + return concatenate([arr, values], axis=axis) + + ### Tensor contraction operations
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -458,7 +458,7 @@ def wrapped_fun(*args): def _CheckAgainstNumpy(self, lax_op, numpy_reference_op, args_maker, check_dtypes=False, tol=1e-5): args = args_maker() - lax_ans = lax_op(*args) numpy_ans = numpy_reference_op(*args) + lax_ans = lax_op(*args) self.assertAllClose(lax_ans, numpy_ans, check_dtypes=check_dtypes, atol=tol, rtol=tol) diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -35,7 +35,8 @@ config.parse_flags_with_absl() FLAGS = config.FLAGS -nonempty_array_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)] +nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)] +nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes empty_array_shapes = [(0,), (0, 4), (3, 0),] scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE] @@ -119,6 +120,7 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None): op_record("log1p", 1, numeric_dtypes, all_shapes, jtu.rand_small_positive(), []), op_record("logaddexp", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("logaddexp2", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"]), + op_record("polyval", 2, numeric_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), []), op_record("ravel", 1, default_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), op_record("sqrt", 1, default_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), @@ -451,6 +453,28 @@ def args_maker(): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format( + axis, ",".join(str(d) for d in base_shape), + ",".join(onp.dtype(dtype).name for dtype in dtypes)), + "axis": axis, "base_shape": base_shape, "dtypes": dtypes, + "rng": jtu.rand_default()} + for dtypes in CombosWithReplacement(default_dtypes, 2) + for base_shape in [(4,), (3, 4), (2, 3, 4)] + for axis in range(-len(base_shape)+1, len(base_shape)))) + def testAppend(self, axis, base_shape, dtypes, rng): + wrapped_axis = axis % len(base_shape) + shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:] + for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)] + onp_fun = lambda arr, values: onp.append(arr, values, axis=axis) + lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis) + + def args_maker(): + return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)] + + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape=[{}]_axis={}_repeats={}".format( jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
add np.append and np.polyval Requested by @duvenaud!
2018-12-31T01:49:59
google/jax
179
google__jax-179
[ "177" ]
c5c6e6c5c7f86dad5ac70e974af7d814e723fdb7
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -29,41 +29,25 @@ from jax.lib import xla_bridge from jax import core -class PRNGKey(object): - """A pseudo-random number generator (PRNG) key for use with lax.random.""" - __slots__ = ["keypair"] - - def __init__(self, seed): - """Create a new PRNG key. - - Args: - seed: a scalar integer value used to initialize the PRNG key. - - Returns: - A new PRNGKey object. - """ - convert = lambda key: lax.convert_element_type(key, onp.uint32) - if onp.shape(seed): - raise TypeError("PRNGKey seed must be a scalar.") - if isinstance(seed, (int, onp.ndarray)): - # Special handling of raw integer values, which may have be 64bit even - # when jax_enable_x64=False and we don't want to drop the top 32 bits - k1 = convert(onp.bitwise_and(onp.right_shift(seed, 32), 0xFFFFFFFF)) - else: - k1 = convert(lax.shift_right_logical(seed, 32)) - k2 = convert(lax.bitwise_and(seed, 0xFFFFFFFF)) - self.keypair = core.pack((k1, k2)) - - @classmethod - def from_keypair(cls, keypair): - """Internal method to create a PRNGKey instance from a raw key pair.""" - new = cls.__new__(cls) - new.keypair = core.pack(keypair) - return new - - -tree_util.register_pytree_node(PRNGKey, lambda k: (k.keypair, None), - lambda _, xs: PRNGKey.from_keypair(xs)) + +def PRNGKey(seed): + if onp.shape(seed): + raise TypeError("PRNGKey seed must be a scalar.") + convert = lambda k: lax.reshape(lax.convert_element_type(k, onp.uint32), [1]) + if isinstance(seed, (int, onp.ndarray)): + # Special handling of raw integer values, which may have be 64bit even + # when jax_enable_x64=False and we don't want to drop the top 32 bits + k1 = convert(onp.bitwise_and(onp.right_shift(seed, 32), 0xFFFFFFFF)) + else: + k1 = convert(lax.shift_right_logical(seed, 32)) + k2 = convert(lax.bitwise_and(seed, 0xFFFFFFFF)) + return lax.concatenate([k1, k2], 0) + +def is_prng_key(key): + try: + return key.shape == (2,) and key.dtype == onp.uint32 + except AttributeError: + return False ### utilities @@ -169,14 +153,15 @@ def split(key, num=2): Returns: A tuple of length `num` of new PRNGKey instances. """ - counts = lax.tie_in(key.keypair, lax.iota(onp.uint32, num * 2)) - bits = lax.reshape(threefry_2x32(key.keypair, counts), (num, 2)) - keypairs = (lax.index_in_dim(bits, i, keepdims=False) for i in range(num)) - return tuple(PRNGKey.from_keypair((kp[0], kp[1])) for kp in keypairs) + counts = lax.tie_in(key, lax.iota(onp.uint32, num * 2)) + bits = lax.reshape(threefry_2x32(key, counts), (num, 2)) + return tuple(bits) def _random_bits(key, bit_width, shape): """Sample uniform random bits of given width and shape using PRNG key.""" + if not is_prng_key(key): + raise TypeError("_random_bits got invalid prng key.") if bit_width not in (32, 64): raise TypeError("requires 32- or 64-bit field width.") max_count = (bit_width // 32) * onp.prod(shape) @@ -184,8 +169,8 @@ def _random_bits(key, bit_width, shape): # TODO(mattjj): just split the key here raise TypeError("requesting more random bits than a single call provides.") - counts = lax.tie_in(key.keypair, lax.iota(onp.uint32, max_count)) - bits = threefry_2x32(key.keypair, counts) + counts = lax.tie_in(key, lax.iota(onp.uint32, max_count)) + bits = threefry_2x32(key, counts) if bit_width == 64: bits = [lax.convert_element_type(x, onp.uint64) for x in np.split(bits, 2)] bits = (bits[0] << onp.uint64(32)) | bits[1]
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -24,6 +24,7 @@ from jax import test_util as jtu from jax.abstract_arrays import ShapedArray from jax import lax +from jax import random from jax.api import jit, grad, jvp, vjp, trace_to_jaxpr, jacfwd, jacrev, hessian from jax.api import vmap from jax.core import unit @@ -326,6 +327,14 @@ def testDynamicSlice(self): expected = x[idx] self.assertAllClose(ans, expected, check_dtypes=False) + def testRandom(self): + seeds = vmap(random.PRNGKey)(onp.arange(10)) + ans = vmap(partial(random.normal, shape=(3, 2)))(seeds) + expected = onp.stack([random.normal(random.PRNGKey(seed), (3, 2)) + for seed in onp.arange(10)]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert len(onp.unique(ans)) == 10 * 3 * 2 + if __name__ == '__main__': absltest.main()
vmap over PRNG keys
2018-12-31T06:27:22
google/jax
188
google__jax-188
[ "187" ]
82fb71413f43821fb4969252d869fb6a140b6131
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1510,7 +1510,7 @@ def _rewriting_take(arr, idx, axis=0): # Handle integer array indexing *without* ellipsis/slices/nones # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#integer-array-indexing if _is_advanced_int_indexer_without_slices(idx): - if isinstance(idx, list): + if isinstance(idx, (tuple, list)): if _any(_shape(e) for e in idx): # At least one sequence element in the index list means broadcasting. idx = broadcast_arrays(*idx) @@ -1521,7 +1521,7 @@ def _rewriting_take(arr, idx, axis=0): # The indexer is just a single integer array. idx = [idx] - flat_idx = tuple(mod(ravel(x), arr.shape[i]) for i, x in enumerate(idx)) + flat_idx = tuple([mod(ravel(x), arr.shape[i]) for i, x in enumerate(idx)]) out = lax.index_take(arr, flat_idx, tuple(range(len(idx)))) return lax.reshape(out, idx[0].shape + _shape(arr)[len(idx):])
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -398,6 +398,10 @@ def fun(unpacked_indexer, x): [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]), IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]), ]), + ("TupleOfListsOfPythonInts", + [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])), + IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])), + ]), ("ListOfPythonIntsAndIntArrays", [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]), IndexSpec(shape=(3, 4, 5), indexer=[0, 1, @@ -630,6 +634,15 @@ def testBooleanIndexingDynamicShapeError(self): i = onp.array([True, True, False]) self.assertRaises(IndexError, lambda: api.jit(lambda x, i: x[i])(x, i)) + def testIssue187(self): + x = lnp.ones((5, 5)) + x[[0, 2, 4], [0, 2, 4]] # doesn't crash + + x = onp.arange(25).reshape((5, 5)) + ans = api.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x) + expected = x[[0, 2, 4], [0, 2, 4]] + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == "__main__": absltest.main()
Failure while indexing across both dimensions of a 2D jax.lax.FilledConstant array The same indexing works when applied to a numpy.ndarray. According to the README, all NumPy indexing (excepting indexed assignments) should work. Screenshots: Indexing works in original numpy but fails with jax's numpy https://drive.google.com/file/d/1bpj4RWV9_oFajeEDAN43IdiN9VHeRHJE/view?usp=sharing Indexing over one dimension works, but not over two: https://drive.google.com/file/d/10nCNyIIj2Gr9vOcSKV0p203hfKqEuQd_/view?usp=sharing Any idea why this is happening?
Thanks for reporting this! This must be something I broke in #140. I'll take a look... Nevermind, it's not #140, and actually is just a bug in our indexing implementation. In particular, this fails the same way: ```python import numpy as onp from jax import jit jit(lamda x: x[[0, 2, 4], [0, 2, 4]])(onp.ones((5, 5)) ``` This is just a case we never handled correctly, due to one incorrect `isinstance(idx, list)` that should have been an `isinstance(idx, (list, tuple))`.
2019-01-03T01:48:19
google/jax
197
google__jax-197
[ "180" ]
6687734bb839392cc8778095285fb22fafc265a2
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -42,6 +42,13 @@ def qr(x, full_matrices=True): q, r = qr_p.bind(x, full_matrices=full_matrices) return q, r +def svd(x, full_matrices=True, compute_uv=True): + s, u, v = svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv) + if compute_uv: + return u, s, v + else: + return s + def triangular_solve(a, b, left_side=False, lower=False, transpose_a=False, conjugate_a=False): return triangular_solve_p.bind( @@ -316,3 +323,49 @@ def qr_jvp_rule(primals, tangents, full_matrices): qr_p.def_abstract_eval(qr_abstract_eval) xla.translations[qr_p] = qr_translation_rule ad.primitive_jvps[qr_p] = qr_jvp_rule + + +# Singular value decomposition + +def svd_impl(operand, full_matrices, compute_uv): + s, u, vt = xla.apply_primitive(svd_p, operand, full_matrices=full_matrices, compute_uv=compute_uv) + return core.pack((s, u, vt)) + +def svd_translation_rule(c, operand, full_matrices, compute_uv): + raise NotImplementedError( + "Singular value decomposition is only implemented on the CPU backend") + +def svd_abstract_eval(operand, full_matrices, compute_uv): + if isinstance(operand, ShapedArray): + if operand.ndim < 2: + raise ValueError("Argument to singular value decomposition must have ndims >= 2") + + batch_dims = operand.shape[:-2] + m = operand.shape[-2] + n = operand.shape[-1] + s = ShapedArray(batch_dims + (min(m, n),), operand.dtype) + u = ShapedArray(batch_dims + (m, m if full_matrices else min(m, n)), operand.dtype) + vt = ShapedArray(batch_dims + (n if full_matrices else min(m, n), n), operand.dtype) + else: + s = operand + u = operand + vt = operand + return core.AbstractTuple((s, u, vt)) + +def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv): + shape = c.GetShape(operand) + dtype = shape.element_type().type + if len(shape.dimensions()) == 2 and dtype in {np.float32, np.float64}: + out = lapack.jax_gesdd(c, operand, full_matrices=full_matrices, compute_uv=compute_uv) + return c.Tuple(c.GetTupleElement(out, 0), + c.GetTupleElement(out, 1), + c.GetTupleElement(out, 2)) + else: + raise NotImplementedError( + "Only unbatched singular value decomposition for real matrices is implemented on CPU") + +svd_p = Primitive('svd') +svd_p.def_impl(svd_impl) +svd_p.def_abstract_eval(svd_abstract_eval) +xla.translations[svd_p] = svd_translation_rule +xla.backend_specific_translations['Host'][svd_p] = svd_cpu_translation_rule diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -55,6 +55,13 @@ def cholesky(a): return lax_linalg.cholesky(a) +@_wraps(onp.linalg.svd) +def svd(a, full_matrices=True, compute_uv=True): + warnings.warn(_EXPERIMENTAL_WARNING) + a = _promote_arg_dtypes(np.asarray(a)) + return lax_linalg.svd(a, full_matrices, compute_uv) + + @_wraps(onp.linalg.slogdet) def slogdet(a): a = _promote_arg_dtypes(np.asarray(a)) diff --git a/jax/scipy/linalg.py b/jax/scipy/linalg.py --- a/jax/scipy/linalg.py +++ b/jax/scipy/linalg.py @@ -40,6 +40,14 @@ def cholesky(a, lower=False, overwrite_a=False, check_finite=True): return l if lower else np.conj(l.T) +@_wraps(scipy.linalg.svd) +def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False, check_finite=True, lapack_driver='gesdd'): + warnings.warn(_EXPERIMENTAL_WARNING) + del overwrite_a, check_finite, lapack_driver + a = np_linalg._promote_arg_dtypes(np.asarray(a)) + return lax_linalg.svd(a, full_matrices, compute_uv) + + @_wraps(scipy.linalg.det) def det(a, overwrite_a=False, check_finite=True): warnings.warn(_EXPERIMENTAL_WARNING)
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -134,6 +134,54 @@ def norm(x): self._CompileAndCheck(partial(np.linalg.eigh, UPLO=uplo), args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_n={}_full_matrices={}_compute_uv={}".format( + jtu.format_shape_dtype_string((m, n), dtype), full_matrices, compute_uv), + "m": m, "n": n, "dtype": dtype, "full_matrices": full_matrices, + "compute_uv": compute_uv, "rng": rng} + for m in [2, 7, 29, 53] + for n in [2, 7, 29, 53] + for dtype in float_types() + for full_matrices in [False, True] + for compute_uv in [False, True] + for rng in [jtu.rand_default()])) + @jtu.skip_on_devices("gpu", "tpu") + def testSVD(self, m, n, dtype, full_matrices, compute_uv, rng): + if not hasattr(lapack, "jax_gesdd"): + self.skipTest("No singular value decomposition implementation available") + args_maker = lambda: [rng((m, n), dtype)] + + # Norm, adjusted for dimension and type. + def norm(x): + n = onp.linalg.norm(x, axis=(-2, -1)) + return n / (max(m, n) * onp.finfo(dtype).eps) + + a, = args_maker() + out = np.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv) + + if compute_uv: + # Check the reconstructed matrices + if full_matrices: + k = min(m, n) + if m < n: + self.assertTrue(onp.all(norm(a - onp.matmul(out[1] * out[0], out[2][:k, :])) < 50)) + else: + self.assertTrue(onp.all(norm(a - onp.matmul(out[1] * out[0][:, :k], out[2])) < 50)) + else: + self.assertTrue(onp.all(norm(a - onp.matmul(out[1] * out[0], out[2])) < 50)) + + # Check the unitary properties of the singular vector matrices. + self.assertTrue(onp.all(norm(onp.eye(out[0].shape[1]) - onp.matmul(T(out[0]), out[0])) < 5)) + if m >= n: + self.assertTrue(onp.all(norm(onp.eye(out[2].shape[1]) - onp.matmul(T(out[2]), out[2])) < 5)) + else: + self.assertTrue(onp.all(norm(onp.eye(out[2].shape[0]) - onp.matmul(out[2], T(out[2]))) < 5)) + + else: + self.assertTrue(onp.allclose(onp.linalg.svd(a, compute_uv=False), onp.asarray(out))) + + self._CompileAndCheck(partial(np.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv), + args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_fullmatrices={}".format(
[feature request] add support for numpy.linalg.svd and scipy.linalg.svd Hi, Considering there has been recent work going on w.r.t. improving the linalg modules with the addition of Cholesky and QR decompositions and solve methods, I would like to see SVD added to this list. I don’t mind working on this with some guidance from the developers, if they think this is worth adding. Thank you.
2019-01-05T05:46:30
google/jax
199
google__jax-199
[ "182" ]
8ada14e96b8c5ec7c0c60099460003be02aa2541
diff --git a/jax/experimental/stax.py b/jax/experimental/stax.py --- a/jax/experimental/stax.py +++ b/jax/experimental/stax.py @@ -47,6 +47,11 @@ def logsoftmax(x, axis=-1): """Apply log softmax to an array of logits, log-normalizing along an axis.""" return x - logsumexp(x, axis, keepdims=True) +def softmax(x, axis=-1): + """Apply softmax to an array of logits, exponentiating and normalizing along an axis.""" + unnormalized = np.exp(x - x.max(axis, keepdims=True)) + return unnormalized / unnormalized.sum(axis, keepdims=True) + def fastvar(x, axis, keepdims): """A fast but less numerically-stable variance calculation than np.var.""" return np.mean(x**2, axis, keepdims=keepdims) - np.mean(x, axis, keepdims=keepdims)**2 @@ -146,7 +151,9 @@ def _elemwise_no_params(fun, **kwargs): return init_fun, apply_fun Tanh = _elemwise_no_params(np.tanh) Relu = _elemwise_no_params(relu) +Exp = _elemwise_no_params(np.exp) LogSoftmax = _elemwise_no_params(logsoftmax, axis=-1) +Softmax = _elemwise_no_params(softmax, axis=-1) Softplus = _elemwise_no_params(softplus)
diff --git a/tests/stax_test.py b/tests/stax_test.py --- a/tests/stax_test.py +++ b/tests/stax_test.py @@ -159,6 +159,17 @@ def testFanInConcat(self, input_shapes, axis): init_fun, apply_fun = stax.FanInConcat(axis) _CheckShapeAgreement(self, init_fun, apply_fun, input_shapes) + def testIsuse182(self): + init_fun, apply_fun = stax.Softmax + input_shape = (10, 3) + inputs = onp.arange(30.).astype("float32").reshape(input_shape) + + out_shape, params = init_fun(input_shape) + out = apply_fun(params, inputs) + + assert out_shape == out.shape + assert onp.allclose(onp.sum(onp.asarray(out), -1), 1.) + if __name__ == "__main__": absltest.main()
add Softmax layer to stax test in examples/mnist_classifier.py, ```python def accuracy(params, batch): inputs, targets = batch target_class = np.argmax(targets, axis=1) x = predict(params, inputs) print(x[0]) ``` the final layer is LogSoftmax, but the output seems not correct, ```python Starting training... [-2.3113673 -2.6440005 -2.4797316 -1.79847 -1.6207608 -2.931935 -3.303906 -2.7275395 -2.2099946 -2.1403143] ```
Can you say a bit more about what might be incorrect here? The sum of the elementwise-exp of those numbers is 1, which is intended. I mean the sum of those numbers should be 1, not their elementwise-exp's. That would be true of a softmax layer, but `LogSoftmax` intended to be a log-softmax layer. Would it be useful to you for us to add a softmax layer (including the exp)? sorry, i misunderstood log-softmax, maybe it will be more convenient if providing loss functions. Got it! I'll add that to my todo list.
2019-01-05T18:05:01
google/jax
203
google__jax-203
[ "173" ]
5031016465d00ce45591f5957fd32d367baa299c
diff --git a/examples/advi.py b/examples/advi.py --- a/examples/advi.py +++ b/examples/advi.py @@ -1,3 +1,17 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Automatic differentiation variational inference in Numpy and JAX. This demo fits a Gaussian approximation to an intractable, unnormalized diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -180,6 +180,7 @@ def jacfun(*args, **kwargs): return tree_transpose(tree_structure(example_args), tree_structure(y), jac) return jacfun +jacobian = jacrev def hessian(fun): return jacfwd(jacrev(fun)) diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -27,7 +27,7 @@ from .. import core from ..core import Trace, Tracer, new_master, pack, AbstractTuple, JaxTuple from ..abstract_arrays import ShapedArray, make_shaped_array, array_types -from ..ad_util import add_jaxvals_p +from ..ad_util import add_jaxvals_p, zeros_like_p, zeros_like_jaxval from ..linear_util import transformation, transformation_with_aux, wrap_init from ..tree_util import register_pytree_node from ..util import unzip2, partial, safe_map @@ -228,6 +228,8 @@ def reducer_batcher(prim, batched_args, batch_dims, axes, **kwargs): kwargs['input_shape'] = operand.shape return prim.bind(operand, axes=axes, **kwargs), bdim_out +# set up primitive batches for ad_util primitives + def add_batched(batched_args, batch_dims): bdx, bdy = batch_dims if bdx == bdy: @@ -238,6 +240,12 @@ def add_batched(batched_args, batch_dims): return add_jaxvals_p.bind(xs, ys), 0 primitive_batchers[add_jaxvals_p] = add_batched +def zeros_like_batched(batched_args, batch_dims): + val, = batched_args + bdim, = batch_dims + return zeros_like_jaxval(val), bdim +primitive_batchers[zeros_like_p] = zeros_like_batched + ### util @@ -287,6 +295,9 @@ def moveaxis(sz, dst, src, x): return pack(map(partial(moveaxis, sz), dst, src, x)) elif type(src) is tuple: return pack(map(partial(moveaxis, sz, dst), src, x)) + elif type(dst) is tuple: + srcs = (src,) * len(dst) + return pack(map(partial(moveaxis, sz), dst, srcs, x)) else: return pack(map(partial(moveaxis, sz, dst, src), x)) elif isinstance(aval, ShapedArray):
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -427,6 +427,11 @@ def assertAllClose(self, x, y, check_dtypes, atol=None, rtol=None): self.assertEqual(len(x), len(y)) for x_elt, y_elt in zip(x, y): self.assertAllClose(x_elt, y_elt, check_dtypes, atol=atol, rtol=rtol) + elif isinstance(x, dict): + self.assertIsInstance(y, dict) + self.assertEqual(set(x.keys()), set(y.keys())) + for k in x.keys(): + self.assertAllClose(x[k], y[k], check_dtypes, atol=atol, rtol=rtol) else: is_array = lambda x: hasattr(x, '__array__') or onp.isscalar(x) self.assertTrue(is_array(x)) diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -23,7 +23,7 @@ from jax import test_util as jtu import jax.numpy as np -from jax import jit, grad, device_get, device_put, jacfwd, jacrev +from jax import jit, grad, device_get, device_put, jacfwd, jacrev, hessian from jax import api from jax.core import Primitive from jax.interpreters.partial_eval import def_abstract_eval @@ -260,6 +260,15 @@ def test_jacobian(self): f = lambda x: np.tanh(np.dot(A, x)) assert onp.allclose(jacfwd(f)(x), jacrev(f)(x)) + @jtu.skip_on_devices("tpu") + def test_hessian(self): + R = onp.random.RandomState(0).randn + A = R(4, 4) + x = R(4) + + f = lambda x: np.dot(x, np.dot(A, x)) + assert onp.allclose(hessian(f)(x), A + A.T) + def test_std_basis(self): basis = api._std_basis(np.zeros(3)) assert getattr(basis, "shape", None) == (3, 3) @@ -276,6 +285,42 @@ def test_std_basis(self): assert getattr(basis[1][0], "shape", None) == (16, 3) assert getattr(basis[1][1], "shape", None) == (16, 3, 4) + @jtu.skip_on_devices("tpu") + def test_jacobian_on_pytrees(self): + for jacfun in [jacfwd, jacrev]: + ans = jacfun(lambda x, y: (x, y))(0., 1.) + expected = (1., 0.) + self.assertAllClose(ans, expected, check_dtypes=False) + + ans = jacfun(lambda x, y: (x, y), 1)(0., 1.) + expected = (0., 1.) + self.assertAllClose(ans, expected, check_dtypes=False) + + ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.) + expected = ((1., 0.), + (0., 1.),) + self.assertAllClose(ans, expected, check_dtypes=False) + + ans = jacfun(lambda x: x[:2])((1., 2., 3.)) + expected = ((1., 0., 0.), + (0., 1., 0.)) + self.assertAllClose(ans, expected, check_dtypes=False) + + R = onp.random.RandomState(0).randn + x = R(2) + y = R(3) + ans = jacfun(lambda x, y: {'x': x, 'xy': np.outer(x, y)})(x, y) + expected = {'x': onp.eye(2), + 'xy': onp.kron(onp.eye(2), y[:, None]).reshape(2, 3, 2)} + self.assertAllClose(ans, expected, check_dtypes=False) + + @jtu.skip_on_devices("tpu") + def test_hessian_on_pytrees(self): + ans = hessian(lambda x: np.array(x)**2)((1., 2.)) + expected = ((onp.array([2., 0.]), onp.array([0., 0.])), + (onp.array([0., 0.]), onp.array([0., 2.]))) + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
How to use jax.hessian? I'm using this snippet from the README ``` from jax import jit, jacfwd, jacrev def hessian(fun): return jit(jacfwd(jacrev(fun))) ``` combined with the "getting started with pytorch data loaders" colab. How do I compute and use the hessian of this neural network? So far I have tried: 1. Naive: `hessian_loss = hessian(loss)(params, x, y)` (`TypeError: jacfwd() takes 2 positional arguments but 4 were given`) 2. Naive2: `hessian_loss = hessian(loss)((params, x, y))` (takes a long time with a small network and then returns `AttributeError: 'PyLeaf' object has no attribute 'node_type'`) 3. FromTheTests: `hessian_loss = hessian(lambda params_: loss(params_, x, y)(params)` (`AttributeError: 'PyLeaf' object has no attribute 'node_type'`) All I really need is to compute the eigenvalues and eigenvectors of this Hessian. Note: `repr(params) = [(DeviceArray{float32[10,784]}, DeviceArray{float32[10]})]` Some ideas that I have: 1. Should I re-write the code to use only flat arrays?
This is a great question, and one that we've been discussing! One problem is that JAX gave you very unhelpful error messages for this use case. We need to improve those. Ultimately, we need the error messages to communicate that the `hessian` function, as with `jacfwd` and `jacrev`, only apply to array-input array-output functions (of one argument). In particular, they don't work for tuple/list/dict inputs, or with respect to multiple arguments, for the same reason in both cases. Here's the heart of the issue: given the example `params` here (a one-element list of a pair of arrays), how would you want the value of `hessian(lambda params: loss(params, x, y))(params)` to be stored? More generally, given a tuple/list/dict argument, how should we represent the Hessian? For arrays, there's a clear answer because it's easy to reason about adding axes. If a function `fun` takes arrays of shape `(in_1, in_2, ..., in_n)` to arrays of shape `(out_1, out_2, ..., out_m)`, then it's reasonable for `hessian(fun)` to be a function that takes an array of shape `(in_1, in_2, ..., in_n)` to an array of shape `(out_1, out_2, ..., out_m, in_1, in_2, ..., in_n, in_1, in_2, ..., in_n)`, though other conventions could be reasonable too. (As I wrote this, I got a sense of [deja vu](https://github.com/HIPS/autograd/issues/363#issuecomment-366559091)...) But if `fun` is, say, a function that takes a tuple of scalars to a scalar, then what should `hessian(fun)` return? Some kind of nested tuple structure? How do we organize the nesting? I really mean that as a question! Do you have a clear sense of what would make sense from your perspective? If there's a clear way to handle cases like these, we'll implement it! The answer may ultimately be that we can only represent Hessians for array-input array-output functions, in which case flattening a container-input function into an array-input one, then working with the Hessian of the flattened function, may be the right answer. JAX could provide utilities for doing that (we had [nice ones in Autograd, with a slick implementation](https://github.com/HIPS/autograd/blob/master/autograd/misc/flatten.py)). In the meantime, flattening things yourself probably makes the most sense, unless you want to wait a few days for JAX to gain some flattening utilities. An alternative might be to use a [Lanczos iteration](https://en.wikipedia.org/wiki/Lanczos_algorithm) together with a Hessian-vector product, which you can express easily in JAX. Then you'd only have to deal with vectors, rather than having to worry about how to represent matrices, and we know how to handle tuples/lists/dicts there. (But Lanczos would only be accurate for extremal eigenvalues, and its numerical effectiveness would depend on the conditioning of the Hessian, whereas direct eigenvalue algorithms would be independent of the conditioning.) > I really mean that as a question! Do you have a clear sense of what would make sense from your perspective? If there's a clear way to handle cases like these, we'll implement it! I'm exploring JAX as a more straightforward way to do higher order derivatives. Right now I am using tensorflow (`tf.hessians`) but it quickly becomes clunky and it doesn't work in eager mode. My real use case is to do some analysis on the eigenvalues and eigenvectors of a neural network with a few thousand parameters. > The answer may ultimately be that we can only represent Hessians for array-input array-output functions, in which case flattening a container-input function into an array-input one, then working with the Hessian of the flattened function, may be the right answer. JAX could provide utilities for doing that (we had nice ones in Autograd, with a slick implementation). Flattening is currently the most straightforward way to do this in pytorch (which also has nice flattening utilities) say when constructing fisher information matrices in RL. Unfortunately, flattening in tensorflow graph mode cannot be used with `tf.hessians` unless the flattened version was used to predict the output of the network. I think the most useful way for hessians of arbitrary matrices or compositions on matrices would be: 1. Have weight matrices represented as `[Input_i x Output_i]` for `i` the index of the layer. 2. Get the output of loss function by composing the weight matrices and required non-linearities. 3. Flatten the matrices into one long array and (somehow) use `jax.hessians` wrto the computed loss to get a matrix of size `[sum(prod(Input_i, Output_i)) x sum(prod(Input_i, Output_i))]`. > An alternative might be to use a Lanczos iteration together with a Hessian-vector product, which you can express easily in JAX. Then you'd only have to deal with vectors, rather than having to worry about how to represent matrices, and we know how to handle tuples/lists/dicts there. (But Lanczos would only be accurate for extremal eigenvalues, and its numerical effectiveness would depend on the conditioning of the Hessian, whereas direct eigenvalue algorithms would be independent of the conditioning.) I really want to compute the eigenspectrum (density of eigenvalues) and associated eigendirections so accuracy is important. We had some conversations about this, and I think our plan is to: 1. add flattening utilities (track it in #190) 2. generalize `hessian` to work on containers (we chose a representation to go with). I'm glad #201 didn't close this issue, because it seems to have broken `hessian`! I'm looking at it now.
2019-01-07T16:55:41
google/jax
232
google__jax-232
[ "227" ]
2166d3f2692a96178a7392c0f2c39b0361514edf
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1592,8 +1592,8 @@ def _rewriting_take(arr, idx, axis=0): axis += isinstance(elt, slice) # advance axis index if not eliminated unexpanded_shape_itr = iter(result.shape) result_shape = tuple(1 if elt is None else next(unexpanded_shape_itr) - for elt in canonical_idx if not isinstance(elt, int)) - return lax.reshape(result, result_shape) + for elt in canonical_idx if isinstance(elt, (type(None), slice))) + return lax.reshape(result, result_shape) if result_shape else result # Handle advanced indexing (non-tuple sequence, ndarray of dtype int or bool, # or a tuple with at least one sequence object).
Failing lax_numpy_indexing_test.py tests on Python 3.7 `python tests/lax_numpy_indexing_test.py --num_generated_cases=1` on Python 3.7 (fresh Anaconda install on a macOS). Numpy version 1.15.4.
For the record, I got this error in python3.7 using the jaxlib wheel we have on pypi: ``` (myenv) mattjj@mattjj:~/packages/jax$ python tests/lax_numpy_indexing_test.py --num_generated_cases=1 /usr/local/google/home/mattjj/miniconda2/envs/myenv/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'lapack' does not match runtime version 3.7 return f(*args, **kwds) Traceback (most recent call last): File "tests/lax_numpy_indexing_test.py", line 29, in <module> from jax import api File "/usr/local/google/home/mattjj/packages/jax/jax/__init__.py", line 18, in <module> import jax.numpy as np # side-effecting import sets up operator overloads File "/usr/local/google/home/mattjj/packages/jax/jax/numpy/__init__.py", line 18, in <module> from . import linalg File "/usr/local/google/home/mattjj/packages/jax/jax/numpy/linalg.py", line 24, in <module> from .. import lax_linalg File "/usr/local/google/home/mattjj/packages/jax/jax/lax_linalg.py", line 32, in <module> from jaxlib import lapack File "jaxlib/lapack.pyx", line 72, in init lapack cdef char cdiag = 'U' if diag else 'N' File "jaxlib/lapack.pyx", line 37, in lapack.register_cpu_custom_call_target Shape = xla_client.Shape File "/usr/local/google/home/mattjj/miniconda2/envs/myenv/lib/python3.7/site-packages/jaxlib/xla_client.py", line 1561, in register_cpu_custom_call_target c_api.RegisterCpuCustomCallTarget(name, fn) UnboundLocalError: local variable 'name' referenced before assignment ``` I just commented out all the code in lax_linalg.py to get this test to run.
2019-01-12T19:10:13