language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def import_classifications(f_h5, test=False): """Imports Radio Galaxy Zoo classifications into crowdastro. f_h5: An HDF5 file. test: Flag to run on only 10 subjects. Default False. """ # TODO(MatthewJA): This only works for ATLAS/CDFS. Generalise. from . import rgz_data as data atlas_positions = f_h5['/atlas/cdfs/numeric'][:, :2] atlas_ids = f_h5['/atlas/cdfs/string']['zooniverse_id'] classification_positions = [] classification_combinations = [] classification_usernames = [] with astropy.io.fits.open( # RGZ only has cdfs classifications config['data_sources']['atlas_cdfs_image'], ignore_blank=True) as atlas_image: wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2) for obj_index, atlas_id in enumerate(atlas_ids): subject = data.get_subject(atlas_id.decode('ascii')) assert subject['zooniverse_id'] == atlas_ids[obj_index].decode('ascii') classifications = data.get_subject_classifications(subject) offset, = wcs.all_world2pix([subject['coords']], FITS_CONVENTION) # The coords are of the middle of the subject. offset[0] -= (config['surveys']['atlas']['fits_width'] * config['surveys']['atlas']['mosaic_scale_x'] // 2) offset[1] -= (config['surveys']['atlas']['fits_height'] * config['surveys']['atlas']['mosaic_scale_y'] // 2) for c_index, classification in enumerate(classifications): user_name = classification.get('user_name', '').encode( 'ascii', errors='ignore') # Usernames actually don't have an upper length limit on RGZ(?!) so # I'll cap everything at 50 characters for my own sanity. if len(user_name) > 50: user_name = user_name[:50] classification = parse_classification(classification, subject, atlas_positions, wcs, offset) full_radio = '|'.join(classification.keys()) for radio, locations in classification.items(): if not locations: locations = [(None, None)] for click_index, location in enumerate(locations): # Check whether the click index is 0 to maintain the # assumption that we only need the first click. pos_row = (obj_index, location[0], location[1], click_index == 0) com_row = (obj_index, full_radio, radio) # A little redundancy here with the index, but we can assert # that they are the same later to check integrity. classification_positions.append(pos_row) classification_combinations.append(com_row) classification_usernames.append(user_name) combinations_dtype = [('index', 'int'), ('full_signature', '<S{}'.format( MAX_RADIO_SIGNATURE_LENGTH)), ('signature', '<S{}'.format( MAX_RADIO_SIGNATURE_LENGTH))] classification_positions = numpy.array(classification_positions, dtype=float) classification_combinations = numpy.array(classification_combinations, dtype=combinations_dtype) f_h5['/atlas/cdfs/'].create_dataset('classification_positions', data=classification_positions, dtype=float) f_h5['/atlas/cdfs/'].create_dataset('classification_usernames', data=classification_usernames, dtype='<S50') f_h5['/atlas/cdfs/'].create_dataset('classification_combinations', data=classification_combinations, dtype=combinations_dtype)
def import_classifications(f_h5, test=False): """Imports Radio Galaxy Zoo classifications into crowdastro. f_h5: An HDF5 file. test: Flag to run on only 10 subjects. Default False. """ # TODO(MatthewJA): This only works for ATLAS/CDFS. Generalise. from . import rgz_data as data atlas_positions = f_h5['/atlas/cdfs/numeric'][:, :2] atlas_ids = f_h5['/atlas/cdfs/string']['zooniverse_id'] classification_positions = [] classification_combinations = [] classification_usernames = [] with astropy.io.fits.open( # RGZ only has cdfs classifications config['data_sources']['atlas_cdfs_image'], ignore_blank=True) as atlas_image: wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2) for obj_index, atlas_id in enumerate(atlas_ids): subject = data.get_subject(atlas_id.decode('ascii')) assert subject['zooniverse_id'] == atlas_ids[obj_index].decode('ascii') classifications = data.get_subject_classifications(subject) offset, = wcs.all_world2pix([subject['coords']], FITS_CONVENTION) # The coords are of the middle of the subject. offset[0] -= (config['surveys']['atlas']['fits_width'] * config['surveys']['atlas']['mosaic_scale_x'] // 2) offset[1] -= (config['surveys']['atlas']['fits_height'] * config['surveys']['atlas']['mosaic_scale_y'] // 2) for c_index, classification in enumerate(classifications): user_name = classification.get('user_name', '').encode( 'ascii', errors='ignore') # Usernames actually don't have an upper length limit on RGZ(?!) so # I'll cap everything at 50 characters for my own sanity. if len(user_name) > 50: user_name = user_name[:50] classification = parse_classification(classification, subject, atlas_positions, wcs, offset) full_radio = '|'.join(classification.keys()) for radio, locations in classification.items(): if not locations: locations = [(None, None)] for click_index, location in enumerate(locations): # Check whether the click index is 0 to maintain the # assumption that we only need the first click. pos_row = (obj_index, location[0], location[1], click_index == 0) com_row = (obj_index, full_radio, radio) # A little redundancy here with the index, but we can assert # that they are the same later to check integrity. classification_positions.append(pos_row) classification_combinations.append(com_row) classification_usernames.append(user_name) combinations_dtype = [('index', 'int'), ('full_signature', '<S{}'.format( MAX_RADIO_SIGNATURE_LENGTH)), ('signature', '<S{}'.format( MAX_RADIO_SIGNATURE_LENGTH))] classification_positions = numpy.array(classification_positions, dtype=float) classification_combinations = numpy.array(classification_combinations, dtype=combinations_dtype) f_h5['/atlas/cdfs/'].create_dataset('classification_positions', data=classification_positions, dtype=float) f_h5['/atlas/cdfs/'].create_dataset('classification_usernames', data=classification_usernames, dtype='<S50') f_h5['/atlas/cdfs/'].create_dataset('classification_combinations', data=classification_combinations, dtype=combinations_dtype)
Python
def check_raw_data(): """Validates existence and correctness of raw data files.""" for source, filename in config['data_sources'].items(): if source == 'radio_galaxy_zoo_db': # Skip the MongoDB name. continue if not os.path.exists(filename): logging.error( '{} expected at {} but not found'.format(source, filename)) if source in config['data_checksums']: valid = checksum_file(filename, config['data_checksums'][source]) if not valid: logging.error('{} has incorrect hash'.format(filename)) else: logging.debug('{} has correct hash'.format(filename))
def check_raw_data(): """Validates existence and correctness of raw data files.""" for source, filename in config['data_sources'].items(): if source == 'radio_galaxy_zoo_db': # Skip the MongoDB name. continue if not os.path.exists(filename): logging.error( '{} expected at {} but not found'.format(source, filename)) if source in config['data_checksums']: valid = checksum_file(filename, config['data_checksums'][source]) if not valid: logging.error('{} has incorrect hash'.format(filename)) else: logging.debug('{} has correct hash'.format(filename))
Python
def score(self, X, Y): """Computes the likelihood of labels and data under the model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ X = numpy.hstack([X, numpy.ones((X.shape[0], 1))]) return self._likelihood(self.w_, self.a_.reshape((-1, 1)), self.b_.reshape((-1, 1)), X, Y.filled(0), Y.filled(1))
def score(self, X, Y): """Computes the likelihood of labels and data under the model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ X = numpy.hstack([X, numpy.ones((X.shape[0], 1))]) return self._likelihood(self.w_, self.a_.reshape((-1, 1)), self.b_.reshape((-1, 1)), X, Y.filled(0), Y.filled(1))
Python
def _likelihood(self, w, a, b, X, Y_0, Y_1): """Computes the likelihood of labels and data under a model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ n_examples = X.shape[0] exp_p = logistic_regression(w, X) exp_a = numpy.ones((n_examples,)) exp_b = numpy.ones((n_examples,)) exp_a = numpy.power(a, Y_0).prod(axis=0) exp_a *= numpy.power(1 - a, 1 - Y_1).prod(axis=0) exp_b *= numpy.power(b, 1 - Y_1).prod(axis=0) exp_b *= numpy.power(1 - b, Y_0).prod(axis=0) return (exp_a * exp_p.T + exp_b * (1 - exp_p).T).prod()
def _likelihood(self, w, a, b, X, Y_0, Y_1): """Computes the likelihood of labels and data under a model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ n_examples = X.shape[0] exp_p = logistic_regression(w, X) exp_a = numpy.ones((n_examples,)) exp_b = numpy.ones((n_examples,)) exp_a = numpy.power(a, Y_0).prod(axis=0) exp_a *= numpy.power(1 - a, 1 - Y_1).prod(axis=0) exp_b *= numpy.power(b, 1 - Y_1).prod(axis=0) exp_b *= numpy.power(1 - b, Y_0).prod(axis=0) return (exp_a * exp_p.T + exp_b * (1 - exp_p).T).prod()
Python
def serialise(self): """Returns a NumPy array representing the optimised parameters.""" return numpy.concatenate([ [self.n_labellers_], self.a_.ravel(), self.b_.ravel(), self.w_.ravel(), ])
def serialise(self): """Returns a NumPy array representing the optimised parameters.""" return numpy.concatenate([ [self.n_labellers_], self.a_.ravel(), self.b_.ravel(), self.w_.ravel(), ])
Python
def unserialise(cls, array): """Converts a NumPy array into a RaykarClassifier.""" rc = cls() n_annotators = int(array[0]) array = array[1:] rc.a_ = array[:n_annotators] rc.b_ = array[n_annotators:n_annotators * 2] rc.w_ = array[n_annotators * 2:] return rc
def unserialise(cls, array): """Converts a NumPy array into a RaykarClassifier.""" rc = cls() n_annotators = int(array[0]) array = array[1:] rc.a_ = array[:n_annotators] rc.b_ = array[n_annotators:n_annotators * 2] rc.w_ = array[n_annotators * 2:] return rc
Python
def from_path(cls, path): """Loads a Results object from a path.""" if not path.endswith('.h5'): path += '.h5' with h5py.File(path, 'r') as f: methods = json.loads(f.attrs['methods']) n_splits = f['results'].shape[1] n_examples = f['results'].shape[2] assert len(methods) == f['results'].shape[0] n_params = f['models'].shape[2] model = f.attrs['model'] return cls(path, methods, n_splits, n_examples, n_params, model)
def from_path(cls, path): """Loads a Results object from a path.""" if not path.endswith('.h5'): path += '.h5' with h5py.File(path, 'r') as f: methods = json.loads(f.attrs['methods']) n_splits = f['results'].shape[1] n_examples = f['results'].shape[2] assert len(methods) == f['results'].shape[0] n_params = f['models'].shape[2] model = f.attrs['model'] return cls(path, methods, n_splits, n_examples, n_params, model)
Python
def pg_means(points, significance=0.01, projections=24): """Find a consensus location with the PG-means algorithm. points: Array of points with dimension (N, 2). significance: Optional. Significance level for increasing the Gaussian count. projections: Optional. How many projections to try before accepting. -> (x, y), boolean of whether PG-means succeeded. """ k = 1 last_gmm = None while True: # Fit a Gaussian mixture model with k components. gmm = sklearn.mixture.GMM(n_components=k, covariance_type='full') try: gmm.fit(points) except ValueError: if last_gmm is None: return maybe_mean(points), False return last_gmm.means_[last_gmm.weights_.argmax()], False last_gmm = gmm for _ in range(projections): # Project the data to one dimension. projection_vector = numpy.random.random(size=(2,)) projected_points = numpy.dot(points, projection_vector) # Project the model to one dimension. We need the CDF in one # dimension, so we'll sample some data points and project them. n_samples = 1000 samples = numpy.dot(gmm.sample(n_samples), projection_vector) samples.sort() def cdf(x): for sample, y in zip(samples, numpy.arange(n_samples) / n_samples): if sample >= x: break return y _, p_value = scipy.stats.kstest(projected_points, numpy.vectorize(cdf)) if p_value < significance: # Reject the null hypothesis. break else: # Null hypothesis was not broken. return gmm.means_[gmm.weights_.argmax()], True k += 1
def pg_means(points, significance=0.01, projections=24): """Find a consensus location with the PG-means algorithm. points: Array of points with dimension (N, 2). significance: Optional. Significance level for increasing the Gaussian count. projections: Optional. How many projections to try before accepting. -> (x, y), boolean of whether PG-means succeeded. """ k = 1 last_gmm = None while True: # Fit a Gaussian mixture model with k components. gmm = sklearn.mixture.GMM(n_components=k, covariance_type='full') try: gmm.fit(points) except ValueError: if last_gmm is None: return maybe_mean(points), False return last_gmm.means_[last_gmm.weights_.argmax()], False last_gmm = gmm for _ in range(projections): # Project the data to one dimension. projection_vector = numpy.random.random(size=(2,)) projected_points = numpy.dot(points, projection_vector) # Project the model to one dimension. We need the CDF in one # dimension, so we'll sample some data points and project them. n_samples = 1000 samples = numpy.dot(gmm.sample(n_samples), projection_vector) samples.sort() def cdf(x): for sample, y in zip(samples, numpy.arange(n_samples) / n_samples): if sample >= x: break return y _, p_value = scipy.stats.kstest(projected_points, numpy.vectorize(cdf)) if p_value < significance: # Reject the null hypothesis. break else: # Null hypothesis was not broken. return gmm.means_[gmm.weights_.argmax()], True k += 1
Python
def _init_wcs(): """Initialise the ATLAS image WCS. Sets global variable wcs.""" with astropy.io.fits.open(config['data_sources']['atlas_image'], ignore_blank=True) as atlas_image: global wcs wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
def _init_wcs(): """Initialise the ATLAS image WCS. Sets global variable wcs.""" with astropy.io.fits.open(config['data_sources']['atlas_image'], ignore_blank=True) as atlas_image: global wcs wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
Python
def vertical_scatter(xs, ys, style='bx', rotation='horizontal', line=False, x_tick_offset=0, numeric_x=False): """Plots a vertical scatter plot. xs: List of x labels. ys: List of lists of points to scatter vertically. style: Plots point style. Default 'bx'. rotation: x label rotation. Default 'horizontal'. line: Draw lines between corresponding points. Default False. x_tick_offset: How far to offset the x tick labels. Default 0. numeric_x: Whether the x labels should be treated as numeric. Default False. """ if not numeric_x: for x in range(len(xs)): plt.plot([x] * len(ys[x]), ys[x], style) if line: assert all(len(y) == len(ys[0]) for y in ys) ys_t = list(zip(*ys)) for y in range(len(ys[0])): plt.plot(range(len(xs)), ys_t[y]) plt.xticks([i + x_tick_offset for i in range(len(xs))], xs, rotation=rotation) plt.xlim((-0.5, len(xs) - 0.5)) # Adds a little buffer. else: for xi, x in enumerate(xs): plt.plot([float(x)] * len(ys[xi]), ys[xi], style) if line: assert all(len(y) == len(ys[0]) for y in ys) ys_t = list(zip(*ys)) for y in range(len(ys[0])): plt.plot(xs, ys_t[y])
def vertical_scatter(xs, ys, style='bx', rotation='horizontal', line=False, x_tick_offset=0, numeric_x=False): """Plots a vertical scatter plot. xs: List of x labels. ys: List of lists of points to scatter vertically. style: Plots point style. Default 'bx'. rotation: x label rotation. Default 'horizontal'. line: Draw lines between corresponding points. Default False. x_tick_offset: How far to offset the x tick labels. Default 0. numeric_x: Whether the x labels should be treated as numeric. Default False. """ if not numeric_x: for x in range(len(xs)): plt.plot([x] * len(ys[x]), ys[x], style) if line: assert all(len(y) == len(ys[0]) for y in ys) ys_t = list(zip(*ys)) for y in range(len(ys[0])): plt.plot(range(len(xs)), ys_t[y]) plt.xticks([i + x_tick_offset for i in range(len(xs))], xs, rotation=rotation) plt.xlim((-0.5, len(xs) - 0.5)) # Adds a little buffer. else: for xi, x in enumerate(xs): plt.plot([float(x)] * len(ys[xi]), ys[xi], style) if line: assert all(len(y) == len(ys[0]) for y in ys) ys_t = list(zip(*ys)) for y in range(len(ys[0])): plt.plot(xs, ys_t[y])
Python
def violinplot(xs, ys, rotation='horizontal', points=100, x_tick_offset=0, facecolour='lightgreen', edgecolour='green'): """Plots a vertical scatter plot. xs: List of x labels. ys: List of lists of points to scatter vertically. rotation: x label rotation. Default 'horizontal'. points: Number of points to use in the density estimate. x_tick_offset: How far to offset the x tick labels. Default 0. facecolour: Colour of the violin plots. Default light green. edgecolour: Colour of the violin lines. Default green. """ vp = plt.violinplot(ys, showmeans=True, showextrema=False, points=points) # plt.violinplot has no arguments that let us set colours, so we have to do # it ourselves. http://stackoverflow.com/a/26291582/1105803 for pc in vp['bodies']: pc.set_facecolor(facecolour) pc.set_edgecolor(edgecolour) vp['cmeans'].set_color(edgecolour) plt.xticks([1 + i + x_tick_offset for i in range(len(xs))], xs, rotation=rotation) plt.xlim((0.5, len(xs) + 0.5))
def violinplot(xs, ys, rotation='horizontal', points=100, x_tick_offset=0, facecolour='lightgreen', edgecolour='green'): """Plots a vertical scatter plot. xs: List of x labels. ys: List of lists of points to scatter vertically. rotation: x label rotation. Default 'horizontal'. points: Number of points to use in the density estimate. x_tick_offset: How far to offset the x tick labels. Default 0. facecolour: Colour of the violin plots. Default light green. edgecolour: Colour of the violin lines. Default green. """ vp = plt.violinplot(ys, showmeans=True, showextrema=False, points=points) # plt.violinplot has no arguments that let us set colours, so we have to do # it ourselves. http://stackoverflow.com/a/26291582/1105803 for pc in vp['bodies']: pc.set_facecolor(facecolour) pc.set_edgecolor(edgecolour) vp['cmeans'].set_color(edgecolour) plt.xticks([1 + i + x_tick_offset for i in range(len(xs))], xs, rotation=rotation) plt.xlim((0.5, len(xs) + 0.5))
Python
def vertical_scatter_ba(results, targets, ylim=(70, 100), violin=False, minorticks=False, percentage=True, **kwargs): """Plot a vertical scatter plot of balanced accuracies. results: Results object. targets: Target labels. ylim: (lower, upper) y axis. violin: Plot a violin plot instead. Default False. minorticks: Use minor ticks. Default False. percentage: Plot percentage rather than raw balanced accuracy. Default True. kwargs: Keyword arguments passed to vertical_scatter. """ xs = sorted(results.method_idx, key=results.method_idx.get) ys = [] for method in xs: y = [] for split in range(results.n_splits): mask = results.get_mask(method, split) split_results = results[method, split][mask].round() split_targets = targets[mask] if len(split_results) == 0: continue # Calculate balanced accuracy. cm = sklearn.metrics.confusion_matrix(split_targets, split_results) tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] ba = (tp / p + tn / n) / 2 if percentage: ba *= 100 y.append(ba) logging.info('Average balanced accuracy ({}): {:.02%}'.format( method, numpy.mean(y))) logging.info('Standard deviation ({}): {:.02%}'.format( method, numpy.std(y))) ys.append(y) if violin: violinplot(xs, ys, **kwargs) else: vertical_scatter(xs, ys, **kwargs) plt.ylim(ylim) plt.grid(b=True, which='both', axis='y', color='grey', linestyle='-', alpha=0.5) if minorticks: plt.minorticks_on() plt.tick_params(axis='x', which='minor', length=0) plt.ylabel('Balanced accuracy' + (' (%)' if percentage else ''))
def vertical_scatter_ba(results, targets, ylim=(70, 100), violin=False, minorticks=False, percentage=True, **kwargs): """Plot a vertical scatter plot of balanced accuracies. results: Results object. targets: Target labels. ylim: (lower, upper) y axis. violin: Plot a violin plot instead. Default False. minorticks: Use minor ticks. Default False. percentage: Plot percentage rather than raw balanced accuracy. Default True. kwargs: Keyword arguments passed to vertical_scatter. """ xs = sorted(results.method_idx, key=results.method_idx.get) ys = [] for method in xs: y = [] for split in range(results.n_splits): mask = results.get_mask(method, split) split_results = results[method, split][mask].round() split_targets = targets[mask] if len(split_results) == 0: continue # Calculate balanced accuracy. cm = sklearn.metrics.confusion_matrix(split_targets, split_results) tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] ba = (tp / p + tn / n) / 2 if percentage: ba *= 100 y.append(ba) logging.info('Average balanced accuracy ({}): {:.02%}'.format( method, numpy.mean(y))) logging.info('Standard deviation ({}): {:.02%}'.format( method, numpy.std(y))) ys.append(y) if violin: violinplot(xs, ys, **kwargs) else: vertical_scatter(xs, ys, **kwargs) plt.ylim(ylim) plt.grid(b=True, which='both', axis='y', color='grey', linestyle='-', alpha=0.5) if minorticks: plt.minorticks_on() plt.tick_params(axis='x', which='minor', length=0) plt.ylabel('Balanced accuracy' + (' (%)' if percentage else ''))
Python
def fillbetween(xs, ys, facecolour='lightgreen', edgecolour='green', marker='x', facealpha=1.0, facekwargs=None, **kwargs): """Plots a line plot with error represented by filled-between lines. xs: List of x values. ys: List of lists of y values. facecolour: Colour of the filled-between lines. Default light green. edgecolour: Colour of the central line. Default green. marker: Point marker. Default 'x'. facealpha: Alpha value of filled section. Default 1.0. facekwargs: Keyword arguments to pass to fill_between. Default {}. """ facekwargs = facekwargs or {} means = numpy.mean(ys, axis=1) stds = numpy.std(ys, axis=1) plt.plot(xs, means, color=edgecolour, marker=marker, **kwargs) plt.fill_between(xs, means - stds, means + stds, color=facecolour, alpha=facealpha, linewidth=0, **facekwargs) plt.fill_between(xs, means - stds, means + stds, facecolor='None', edgecolor=edgecolour, alpha=0.5, **facekwargs)
def fillbetween(xs, ys, facecolour='lightgreen', edgecolour='green', marker='x', facealpha=1.0, facekwargs=None, **kwargs): """Plots a line plot with error represented by filled-between lines. xs: List of x values. ys: List of lists of y values. facecolour: Colour of the filled-between lines. Default light green. edgecolour: Colour of the central line. Default green. marker: Point marker. Default 'x'. facealpha: Alpha value of filled section. Default 1.0. facekwargs: Keyword arguments to pass to fill_between. Default {}. """ facekwargs = facekwargs or {} means = numpy.mean(ys, axis=1) stds = numpy.std(ys, axis=1) plt.plot(xs, means, color=edgecolour, marker=marker, **kwargs) plt.fill_between(xs, means - stds, means + stds, color=facecolour, alpha=facealpha, linewidth=0, **facekwargs) plt.fill_between(xs, means - stds, means + stds, facecolor='None', edgecolor=edgecolour, alpha=0.5, **facekwargs)
Python
def create_generator(X, Y): """Yields generated images and auxiliary inputs. https://github.com/fchollet/keras/issues/3386 """ X_im = X[:, n_nonimage_features:].reshape( (-1, 1, PATCH_DIAMETER, PATCH_DIAMETER)) X_au = X[:, :n_nonimage_features] while True: # Shuffle indices. idx = numpy.random.permutation(X.shape[0]) # Standard image generator. datagen = ImageDataGenerator( data_format='channels_first', horizontal_flip=True, vertical_flip=True) datagen.fit(X_im) # Shuffle the data before batching using known indices. batches = datagen.flow(X_im[idx], Y[idx], batch_size=batch_size, shuffle=False) idx0 = 0 for batch in batches: idx1 = idx0 + batch[0].shape[0] # Yield ((image, aux), label) tuples. to_yield = ([X_au[idx[idx0:idx1]], batch[0]], batch[1]) yield to_yield idx0 = idx1 if idx1 >= X.shape[0]: break
def create_generator(X, Y): """Yields generated images and auxiliary inputs. https://github.com/fchollet/keras/issues/3386 """ X_im = X[:, n_nonimage_features:].reshape( (-1, 1, PATCH_DIAMETER, PATCH_DIAMETER)) X_au = X[:, :n_nonimage_features] while True: # Shuffle indices. idx = numpy.random.permutation(X.shape[0]) # Standard image generator. datagen = ImageDataGenerator( data_format='channels_first', horizontal_flip=True, vertical_flip=True) datagen.fit(X_im) # Shuffle the data before batching using known indices. batches = datagen.flow(X_im[idx], Y[idx], batch_size=batch_size, shuffle=False) idx0 = 0 for batch in batches: idx1 = idx0 + batch[0].shape[0] # Yield ((image, aux), label) tuples. to_yield = ([X_au[idx[idx0:idx1]], batch[0]], batch[1]) yield to_yield idx0 = idx1 if idx1 >= X.shape[0]: break
Python
def check_raw_data(training_h5): """Sanity check the input data training_h5: Training HDF5 file. """ def HDF5_type(name, node): if isinstance(node, h5py.Dataset): logging.info('Dataset: {}'.format(node.name)) logging.info('\thas shape {}'.format(node.shape)) else: logging.info('\t{} of type {}'.format((node.name, type(node)))) logging.info('Peeking into HDF5 file') training_h5.visititems(HDF5_type) logging.info('End file peeking')
def check_raw_data(training_h5): """Sanity check the input data training_h5: Training HDF5 file. """ def HDF5_type(name, node): if isinstance(node, h5py.Dataset): logging.info('Dataset: {}'.format(node.name)) logging.info('\thas shape {}'.format(node.shape)) else: logging.info('\t{} of type {}'.format((node.name, type(node)))) logging.info('Peeking into HDF5 file') training_h5.visititems(HDF5_type) logging.info('End file peeking')
Python
def add_label(self, index, label, retrain=True): """Adds a label from an oracle. index: Index of data point to label. label: Label from the oracle. """ self.labels[index] = label if retrain: self.retrain()
def add_label(self, index, label, retrain=True): """Adds a label from an oracle. index: Index of data point to label. label: Label from the oracle. """ self.labels[index] = label if retrain: self.retrain()
Python
def add_labels(self, indices, labels, retrain=True): """Adds labels from an oracle. indices: Indices of data points to label. labels: Labels from the oracle. """ for index, label in zip(indices, labels): self.add_label(index, label, retrain=False) if retrain: self.retrain()
def add_labels(self, indices, labels, retrain=True): """Adds labels from an oracle. indices: Indices of data points to label. labels: Labels from the oracle. """ for index, label in zip(indices, labels): self.add_label(index, label, retrain=False) if retrain: self.retrain()
Python
def score(self, test_xs, test_ts): """Finds cross-entropy error on test data.""" return sklearn.metrics.log_loss( test_ts, self.classifier.predict(test_xs))
def score(self, test_xs, test_ts): """Finds cross-entropy error on test data.""" return sklearn.metrics.log_loss( test_ts, self.classifier.predict(test_xs))
Python
def distance_from(point): """Returns a function that returns the distance from a point. Used for keys. """ def _dist(other): return numpy.hypot(point[0] - other[0], point[1] - other[1]) return _dist
def distance_from(point): """Returns a function that returns the distance from a point. Used for keys. """ def _dist(other): return numpy.hypot(point[0] - other[0], point[1] - other[1]) return _dist
Python
def generate(db_path, cache_name, consensus_table, host_table, radio_table, atlas=False): """Generates the Radio Galaxy Zoo catalogue. Warning: table arguments are not validated! This could be dangerous. db_path: Path to consensus database. cache_name: Name of Gator cache. consensus_table: Database table of consensuses. host_table: Output database table of RGZ hosts. Will be overwritten! radio_table: Output database table of RGZ radio sources. Will be overwritten! atlas: Whether to only freeze ATLAS subjects. Default False (though this function currently only works for True). """ with contextlib.closing(sqlite3.connect(db_path)) as conn: conn.row_factory = sqlite3.Row cur = conn.cursor() cur.execute('DROP TABLE IF EXISTS {}'.format(host_table)) cur.execute('DROP TABLE IF EXISTS {}'.format(radio_table)) conn.commit() cur.execute('CREATE TABLE {} ' '(zooniverse_id TEXT, source TEXT, rgz_name TEXT, ' 'swire_name TEXT, ra TEXT, dec TEXT, agreement REAL)' ''.format(host_table)) cur.execute('CREATE TABLE {} ' '(rgz_name TEXT, radio_component TEXT, agreement REAL)' ''.format(radio_table)) conn.commit() host_sql = ('INSERT INTO {} (zooniverse_id, source, rgz_name, ' 'swire_name, ra, dec, agreement) VALUES ' '(?, ?, ?, ?, ?, ?, ?)'.format(host_table)) radio_sql = ('INSERT INTO {} (rgz_name, radio_component, agreement) ' 'VALUES (?, ?, ?)'.format(radio_table)) host_params = [] radio_params = [] # Store how many hosts have no associated SWIRE object (for debugging). n_no_matches = 0 # Store how many hosts have null consensuses (for debugging). n_null = 0 n_subjects = data.get_all_subjects(atlas=atlas).count() for index, subject in enumerate(data.get_all_subjects(atlas=atlas)): print('Generating catalogue: {}/{} ({:.02%})'.format( index + 1, n_subjects, (index + 1) / n_subjects), end='\r') consensuses = cur.execute( 'SELECT * FROM {} WHERE ' 'zooniverse_id = ?'.format(consensus_table), [subject['zooniverse_id']]) fits = data.get_ir_fits(subject) wcs = astropy.wcs.WCS(fits.header) for consensus in consensuses: # Each consensus represents one AGN. if consensus['source_x'] and consensus['source_y']: # Not null. try: rgz_name, swire_name, ra, dec, agreement = make_host( subject, wcs, cache_name, consensus) except CatalogueError: logging.debug('No SWIRE object for %s (%.2f, %.2f).', subject['zooniverse_id'], consensus['source_x'], consensus['source_y']) n_no_matches += 1 continue host_params.append((subject['zooniverse_id'], subject.get('metadata', {}).get( 'source'), rgz_name, swire_name, ra, dec, agreement)) # Get radio components. radio_components = set( # Set to nix duplicates. consensus['radio_signature'].split(';')) for radio_component in radio_components: radio_params.append((rgz_name, radio_component, consensus['radio_agreement'])) else: n_null += 1 logging.debug('Skipping null consensus for subject %s.', subject['zooniverse_id']) logging.debug('%d hosts with no associated SWIRE object.', n_no_matches) logging.debug('%d hosts with null consensuses.', n_null) logging.debug('Writing to database.') cur.executemany(host_sql, host_params) cur.executemany(radio_sql, radio_params) conn.commit() # Go back and clear up duplicates. The process is as follows: # 1. Check the components table for duplicates. # 2. For each duplicate, we want to choose one "true" host. This is # because each component can only belong to one host. We will pick # the host with the highest percentage agreement (though there are # likely better ways to do this). # 3. Delete the duplicates and replace them with a new component with # the "true" host and the highest agreement. # 4. For each host that no longer has a component, delete it from the # hosts table. all_duplicates = cur.execute("""select radio_component from {} group by radio_component having count(*) > 1""".format( radio_table)) mur = conn.cursor() # Number of deleted radio components. n_deleted = 0 for radio_component in all_duplicates: name = radio_component['radio_component'] logging.debug('Removing duplicates for %s.', name) best = next(mur.execute("""select rgz_name, agreement from {} where radio_component = ? order by agreement desc limit 1""".format(radio_table), [name])) mur.execute("""delete from {} where radio_component = ?""".format(radio_table), [name]) n_deleted += mur.rowcount mur.execute("""insert into {} (rgz_name, radio_component, agreement) values (?, ?, ?)""".format(radio_table), [best['rgz_name'], name, best['agreement']]) conn.commit() logging.debug('Deleted %d duplicate radio components.', n_deleted) logging.debug('Removing hosts with no components.') cur.execute("""delete from {0} where rgz_name in ( select {0}.rgz_name from {0} left join {1} on {0}.rgz_name = {1}.rgz_name where {1}.rgz_name is null )""".format( host_table, radio_table)) conn.commit() logging.debug('Deleted %d duplicate hosts.', cur.rowcount) logging.debug('Removing duplicate hosts.') cur.execute("""delete from {0} where rowid not in (select min(rowid) from {0} group by rgz_name)""".format(host_table)) conn.commit()
def generate(db_path, cache_name, consensus_table, host_table, radio_table, atlas=False): """Generates the Radio Galaxy Zoo catalogue. Warning: table arguments are not validated! This could be dangerous. db_path: Path to consensus database. cache_name: Name of Gator cache. consensus_table: Database table of consensuses. host_table: Output database table of RGZ hosts. Will be overwritten! radio_table: Output database table of RGZ radio sources. Will be overwritten! atlas: Whether to only freeze ATLAS subjects. Default False (though this function currently only works for True). """ with contextlib.closing(sqlite3.connect(db_path)) as conn: conn.row_factory = sqlite3.Row cur = conn.cursor() cur.execute('DROP TABLE IF EXISTS {}'.format(host_table)) cur.execute('DROP TABLE IF EXISTS {}'.format(radio_table)) conn.commit() cur.execute('CREATE TABLE {} ' '(zooniverse_id TEXT, source TEXT, rgz_name TEXT, ' 'swire_name TEXT, ra TEXT, dec TEXT, agreement REAL)' ''.format(host_table)) cur.execute('CREATE TABLE {} ' '(rgz_name TEXT, radio_component TEXT, agreement REAL)' ''.format(radio_table)) conn.commit() host_sql = ('INSERT INTO {} (zooniverse_id, source, rgz_name, ' 'swire_name, ra, dec, agreement) VALUES ' '(?, ?, ?, ?, ?, ?, ?)'.format(host_table)) radio_sql = ('INSERT INTO {} (rgz_name, radio_component, agreement) ' 'VALUES (?, ?, ?)'.format(radio_table)) host_params = [] radio_params = [] # Store how many hosts have no associated SWIRE object (for debugging). n_no_matches = 0 # Store how many hosts have null consensuses (for debugging). n_null = 0 n_subjects = data.get_all_subjects(atlas=atlas).count() for index, subject in enumerate(data.get_all_subjects(atlas=atlas)): print('Generating catalogue: {}/{} ({:.02%})'.format( index + 1, n_subjects, (index + 1) / n_subjects), end='\r') consensuses = cur.execute( 'SELECT * FROM {} WHERE ' 'zooniverse_id = ?'.format(consensus_table), [subject['zooniverse_id']]) fits = data.get_ir_fits(subject) wcs = astropy.wcs.WCS(fits.header) for consensus in consensuses: # Each consensus represents one AGN. if consensus['source_x'] and consensus['source_y']: # Not null. try: rgz_name, swire_name, ra, dec, agreement = make_host( subject, wcs, cache_name, consensus) except CatalogueError: logging.debug('No SWIRE object for %s (%.2f, %.2f).', subject['zooniverse_id'], consensus['source_x'], consensus['source_y']) n_no_matches += 1 continue host_params.append((subject['zooniverse_id'], subject.get('metadata', {}).get( 'source'), rgz_name, swire_name, ra, dec, agreement)) # Get radio components. radio_components = set( # Set to nix duplicates. consensus['radio_signature'].split(';')) for radio_component in radio_components: radio_params.append((rgz_name, radio_component, consensus['radio_agreement'])) else: n_null += 1 logging.debug('Skipping null consensus for subject %s.', subject['zooniverse_id']) logging.debug('%d hosts with no associated SWIRE object.', n_no_matches) logging.debug('%d hosts with null consensuses.', n_null) logging.debug('Writing to database.') cur.executemany(host_sql, host_params) cur.executemany(radio_sql, radio_params) conn.commit() # Go back and clear up duplicates. The process is as follows: # 1. Check the components table for duplicates. # 2. For each duplicate, we want to choose one "true" host. This is # because each component can only belong to one host. We will pick # the host with the highest percentage agreement (though there are # likely better ways to do this). # 3. Delete the duplicates and replace them with a new component with # the "true" host and the highest agreement. # 4. For each host that no longer has a component, delete it from the # hosts table. all_duplicates = cur.execute("""select radio_component from {} group by radio_component having count(*) > 1""".format( radio_table)) mur = conn.cursor() # Number of deleted radio components. n_deleted = 0 for radio_component in all_duplicates: name = radio_component['radio_component'] logging.debug('Removing duplicates for %s.', name) best = next(mur.execute("""select rgz_name, agreement from {} where radio_component = ? order by agreement desc limit 1""".format(radio_table), [name])) mur.execute("""delete from {} where radio_component = ?""".format(radio_table), [name]) n_deleted += mur.rowcount mur.execute("""insert into {} (rgz_name, radio_component, agreement) values (?, ?, ?)""".format(radio_table), [best['rgz_name'], name, best['agreement']]) conn.commit() logging.debug('Deleted %d duplicate radio components.', n_deleted) logging.debug('Removing hosts with no components.') cur.execute("""delete from {0} where rgz_name in ( select {0}.rgz_name from {0} left join {1} on {0}.rgz_name = {1}.rgz_name where {1}.rgz_name is null )""".format( host_table, radio_table)) conn.commit() logging.debug('Deleted %d duplicate hosts.', cur.rowcount) logging.debug('Removing duplicate hosts.') cur.execute("""delete from {0} where rowid not in (select min(rowid) from {0} group by rgz_name)""".format(host_table)) conn.commit()
Python
def clicks(cs, colour='gray'): """Plots a list of RGZ clicks. Clicks will be flipped and scaled to match the FITS images. cs: List of (x, y) click tuples. -> MatPlotLib scatter plot. """ cs = (config['surveys']['atlas']['fits_height'] - numpy.array(cs) * config['surveys']['atlas']['click_to_fits']) return matplotlib.pyplot.scatter(cs[:, 0], cs[:, 1], color=colour)
def clicks(cs, colour='gray'): """Plots a list of RGZ clicks. Clicks will be flipped and scaled to match the FITS images. cs: List of (x, y) click tuples. -> MatPlotLib scatter plot. """ cs = (config['surveys']['atlas']['fits_height'] - numpy.array(cs) * config['surveys']['atlas']['click_to_fits']) return matplotlib.pyplot.scatter(cs[:, 0], cs[:, 1], color=colour)
Python
def radio(subject): """Plots the radio image of a subject. subject: RGZ subject. -> MatPlotLib image plot. """ return image(data.get_radio(subject))
def radio(subject): """Plots the radio image of a subject. subject: RGZ subject. -> MatPlotLib image plot. """ return image(data.get_radio(subject))
Python
def subject(s): """Shows the IR and contours of a subject. s: RGZ subject. """ ir(s) contours(s, colour='green') matplotlib.pyplot.xlim(0, config['surveys']['atlas']['fits_width']) matplotlib.pyplot.ylim(0, config['surveys']['atlas']['fits_height'])
def subject(s): """Shows the IR and contours of a subject. s: RGZ subject. """ ir(s) contours(s, colour='green') matplotlib.pyplot.xlim(0, config['surveys']['atlas']['fits_width']) matplotlib.pyplot.ylim(0, config['surveys']['atlas']['fits_height'])
Python
def balanced_accuracy(y_true, y_pred): """Computes the balanced accuracy of a predictor. y_true: (n_examples,) array of true labels. y_pred: (n_examples,) (masked) array of predicted labels. -> float or None (if the balanced accuracy isn't defined). """ if hasattr(y_pred, 'mask') and not isinstance(y_pred.mask, bool): cm = sklearn.metrics.confusion_matrix( y_true[~y_pred.mask], y_pred[~y_pred.mask]).astype(float) else: cm = sklearn.metrics.confusion_matrix(y_true, y_pred).astype(float) tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] if not n or not p: return None ba = (tp / p + tn / n) / 2 return ba
def balanced_accuracy(y_true, y_pred): """Computes the balanced accuracy of a predictor. y_true: (n_examples,) array of true labels. y_pred: (n_examples,) (masked) array of predicted labels. -> float or None (if the balanced accuracy isn't defined). """ if hasattr(y_pred, 'mask') and not isinstance(y_pred.mask, bool): cm = sklearn.metrics.confusion_matrix( y_true[~y_pred.mask], y_pred[~y_pred.mask]).astype(float) else: cm = sklearn.metrics.confusion_matrix(y_true, y_pred).astype(float) tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] if not n or not p: return None ba = (tp / p + tn / n) / 2 return ba
Python
def majority_vote(y): """Computes the majority vote of a set of crowd labels. y: (n_annotators, n_examples) NumPy masked array of labels. -> (n_examples,) NumPy array of labels. """ _, n_samples = y.shape mv = numpy.zeros((n_samples,)) for i in range(n_samples): labels = y[:, i] if labels.mask is False: counter = collections.Counter(labels) else: counter = collections.Counter(labels[~labels.mask]) if counter: mv[i] = max(counter, key=counter.get) else: # No labels for this data point. mv[i] = numpy.random.randint(2) # ¯\_(ツ)_/¯ return mv
def majority_vote(y): """Computes the majority vote of a set of crowd labels. y: (n_annotators, n_examples) NumPy masked array of labels. -> (n_examples,) NumPy array of labels. """ _, n_samples = y.shape mv = numpy.zeros((n_samples,)) for i in range(n_samples): labels = y[:, i] if labels.mask is False: counter = collections.Counter(labels) else: counter = collections.Counter(labels[~labels.mask]) if counter: mv[i] = max(counter, key=counter.get) else: # No labels for this data point. mv[i] = numpy.random.randint(2) # ¯\_(ツ)_/¯ return mv
Python
def logistic_regression(w, x): """Logistic regression classifier model. w: Weights w. (n_features,) NumPy array x: Data point x_i. (n_features,) NumPy array -> float in [0, 1] """ return scipy.special.expit(numpy.dot(x, w.T))
def logistic_regression(w, x): """Logistic regression classifier model. w: Weights w. (n_features,) NumPy array x: Data point x_i. (n_features,) NumPy array -> float in [0, 1] """ return scipy.special.expit(numpy.dot(x, w.T))
Python
def fd(n: int) -> tuple: """Calculate nontrivial factors of n using the fermat difference of squares method. This method works best when the factors p,q are near sqrt n. There are more optimizations to be made, namely the sieve method described in the wikipedia page for this method. Arguments: n {int} -- number to factor Returns: tuple -- (p,q) """ # lower bound for a a = math.ceil(math.sqrt(n)) # a must be odd if n cong 1 (mod 4) if n % 4 == 1: if a % 2 == 0: a += 1 else: # a must be even if n cong -1 (mod 4) if a % 2 == 1: a += 1 # recall that n = a^2 - b^2 b = math.sqrt(a**2 - n) while not b.is_integer(): print(a, b) a += 2 b = math.sqrt(a**2 - n) if b > n: return None print("a,b found!") print(f"a = {a}") print(f"b = {int(b)}") return (a, int(b), int(a+b), int(a-b))
def fd(n: int) -> tuple: """Calculate nontrivial factors of n using the fermat difference of squares method. This method works best when the factors p,q are near sqrt n. There are more optimizations to be made, namely the sieve method described in the wikipedia page for this method. Arguments: n {int} -- number to factor Returns: tuple -- (p,q) """ # lower bound for a a = math.ceil(math.sqrt(n)) # a must be odd if n cong 1 (mod 4) if n % 4 == 1: if a % 2 == 0: a += 1 else: # a must be even if n cong -1 (mod 4) if a % 2 == 1: a += 1 # recall that n = a^2 - b^2 b = math.sqrt(a**2 - n) while not b.is_integer(): print(a, b) a += 2 b = math.sqrt(a**2 - n) if b > n: return None print("a,b found!") print(f"a = {a}") print(f"b = {int(b)}") return (a, int(b), int(a+b), int(a-b))
Python
def populate_courses_in_categories(course_categories, courses): """Scraps a course category webpage and adds all courses found to a list Keyword arguments: course_categories -- the relative urls of all course categories courses -- a list where the courses maps are added to """ for category in course_categories: url = f'https://catalog.upenn.edu{category}index.html' html = requests.get(url) soup = BeautifulSoup(html.text, "html.parser") category_title = soup.find("h1", {"class" : "page-title"}).text.split('(')[0] category_title = category_title.replace(u'\xa0', u' ') print(category_title) course_list = soup.find("div", {"class" : "sc_sccoursedescs"}) if course_list is None: print(f'No courses for {category}') continue for course_block in course_list.find_all("div", {"class" : "courseblock"}): title = course_block.find("p", {"class" : "courseblocktitle"}).text title = title.replace(u'\xa0', u' ') description_items = [] for course_description_extra in course_block.find_all("p", {"class" : "courseblockextra"}): description_item = course_description_extra.text.replace(u'\xa0', u' ') description_items.append(description_item) code, name = title.split(" ", 1) code = ''.join(code.split()) code_info = re.findall(r'(\w+?)(\d+)', code)[0] course = {} course["title"] = name course["prefix"] = code_info[0] course["number"] = code_info[1] course["description"] = json.dumps(description_items) course["prefixTitle"] = category_title courses[code] = course
def populate_courses_in_categories(course_categories, courses): """Scraps a course category webpage and adds all courses found to a list Keyword arguments: course_categories -- the relative urls of all course categories courses -- a list where the courses maps are added to """ for category in course_categories: url = f'https://catalog.upenn.edu{category}index.html' html = requests.get(url) soup = BeautifulSoup(html.text, "html.parser") category_title = soup.find("h1", {"class" : "page-title"}).text.split('(')[0] category_title = category_title.replace(u'\xa0', u' ') print(category_title) course_list = soup.find("div", {"class" : "sc_sccoursedescs"}) if course_list is None: print(f'No courses for {category}') continue for course_block in course_list.find_all("div", {"class" : "courseblock"}): title = course_block.find("p", {"class" : "courseblocktitle"}).text title = title.replace(u'\xa0', u' ') description_items = [] for course_description_extra in course_block.find_all("p", {"class" : "courseblockextra"}): description_item = course_description_extra.text.replace(u'\xa0', u' ') description_items.append(description_item) code, name = title.split(" ", 1) code = ''.join(code.split()) code_info = re.findall(r'(\w+?)(\d+)', code)[0] course = {} course["title"] = name course["prefix"] = code_info[0] course["number"] = code_info[1] course["description"] = json.dumps(description_items) course["prefixTitle"] = category_title courses[code] = course
Python
def create_course_csv(courses): """Creates a csv file from a list of courses Keyword arguments: courses -- a list of course mappings """ csv_columns = ['Prefix','Number','Title','Category','Description','Comments'] csv_file = "courses.csv" try: with open(csv_file, 'w') as csvfile: writer = DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for course_code, course_info in courses.items(): data = {} data['Prefix'] = (course_info["prefix"]) data['Number'] = (course_info["number"]) data['Title'] = (course_info["title"]) data['Category'] = (course_info["prefixTitle"]) data['Description'] = (course_info["description"]) data['Comments'] = ("") writer.writerow(data) except IOError: print("I/O error")
def create_course_csv(courses): """Creates a csv file from a list of courses Keyword arguments: courses -- a list of course mappings """ csv_columns = ['Prefix','Number','Title','Category','Description','Comments'] csv_file = "courses.csv" try: with open(csv_file, 'w') as csvfile: writer = DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for course_code, course_info in courses.items(): data = {} data['Prefix'] = (course_info["prefix"]) data['Number'] = (course_info["number"]) data['Title'] = (course_info["title"]) data['Category'] = (course_info["prefixTitle"]) data['Description'] = (course_info["description"]) data['Comments'] = ("") writer.writerow(data) except IOError: print("I/O error")
Python
def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' img = Image.open(image) if img.size[0] > img.size[1]: img.thumbnail((30000, 256)) else: img.thumbnail((256, 30000)) img = img.crop(((img.width-224)/2, (img.height-224)/2, (img.width-224)/2 + 224, (img.height-224)/2 + 224)) img=np.array(img)/255 img= (img-np.array([0.485, 0.456, 0.406]))/(np.array([0.229, 0.224, 0.225])) img= img.transpose((2,0,1)) return img
def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' img = Image.open(image) if img.size[0] > img.size[1]: img.thumbnail((30000, 256)) else: img.thumbnail((256, 30000)) img = img.crop(((img.width-224)/2, (img.height-224)/2, (img.width-224)/2 + 224, (img.height-224)/2 + 224)) img=np.array(img)/255 img= (img-np.array([0.485, 0.456, 0.406]))/(np.array([0.229, 0.224, 0.225])) img= img.transpose((2,0,1)) return img
Python
def predict(image_path, model, topk=in_arg.top_k): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' if torch.cuda.is_available(): model.cuda() else: model.to(device) img = process_image(image_path) image_tensor = torch.from_numpy(img).type(torch.FloatTensor).to("cpu") inputs = image_tensor.unsqueeze(0) output = model.forward(inputs) pb= torch.exp(output) top_pb, top_class = pb.topk(topk) top_pb = top_pb.tolist()[0] top_class = top_class.tolist()[0] data = {val: key for key, val in model.class_to_idx.items()} top_flow = [] for i in top_class: i_ = "{}".format(data.get(i)) top_flow.append(cat_to_name.get(i_)) return top_pb, top_flow
def predict(image_path, model, topk=in_arg.top_k): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' if torch.cuda.is_available(): model.cuda() else: model.to(device) img = process_image(image_path) image_tensor = torch.from_numpy(img).type(torch.FloatTensor).to("cpu") inputs = image_tensor.unsqueeze(0) output = model.forward(inputs) pb= torch.exp(output) top_pb, top_class = pb.topk(topk) top_pb = top_pb.tolist()[0] top_class = top_class.tolist()[0] data = {val: key for key, val in model.class_to_idx.items()} top_flow = [] for i in top_class: i_ = "{}".format(data.get(i)) top_flow.append(cat_to_name.get(i_)) return top_pb, top_flow
Python
def _keck_one_alt_az_axis(axis: plt.Axes) -> plt.Axes: """ Modify a default polar axis to be set up for altitude-azimuth plotting. Be careful! The input axis must be a polar projection already! """ axis.set_theta_zero_location('N') axis.set_theta_direction(-1) # set angle direction to clockwise lower_limit_az = np.arange(np.radians(5.3), np.radians(146.3), np.radians(0.1)) upper_limit_az = np.concatenate((np.arange(np.radians(146.3), np.radians(360.0), np.radians(0.1)), np.arange(np.radians(0.0), np.radians(5.4), np.radians(0.1)))) lower_limit_alt = np.ones_like(lower_limit_az) * 33.3 upper_limit_alt = np.ones_like(upper_limit_az) * 18 azimuth_limit = np.concatenate((lower_limit_az, upper_limit_az, [lower_limit_az[0]])) altitude_limit = np.concatenate((lower_limit_alt, upper_limit_alt, [lower_limit_alt[0]])) axis.fill_between(azimuth_limit, altitude_limit, 0, color='k', alpha=0.5, linewidth=0, zorder=2) axis.set_rmin(0) axis.set_rmax(90) axis.set_yticklabels([]) axis.set_xticks(np.arange(0, 2 * np.pi, np.pi / 6)) axis.xaxis.set_tick_params(pad=-3) axis.yaxis.set_major_locator(ticker.MultipleLocator(15)) axis.yaxis.set_minor_locator(ticker.NullLocator()) axis.set_xticklabels( ['N', '', '', 'E', '', '', 'S', '', '', 'W', '', '']) axis.grid(linewidth=0.5, zorder=1) axis.set_xlabel('Keck I Pointing Limits', fontweight='bold') return axis
def _keck_one_alt_az_axis(axis: plt.Axes) -> plt.Axes: """ Modify a default polar axis to be set up for altitude-azimuth plotting. Be careful! The input axis must be a polar projection already! """ axis.set_theta_zero_location('N') axis.set_theta_direction(-1) # set angle direction to clockwise lower_limit_az = np.arange(np.radians(5.3), np.radians(146.3), np.radians(0.1)) upper_limit_az = np.concatenate((np.arange(np.radians(146.3), np.radians(360.0), np.radians(0.1)), np.arange(np.radians(0.0), np.radians(5.4), np.radians(0.1)))) lower_limit_alt = np.ones_like(lower_limit_az) * 33.3 upper_limit_alt = np.ones_like(upper_limit_az) * 18 azimuth_limit = np.concatenate((lower_limit_az, upper_limit_az, [lower_limit_az[0]])) altitude_limit = np.concatenate((lower_limit_alt, upper_limit_alt, [lower_limit_alt[0]])) axis.fill_between(azimuth_limit, altitude_limit, 0, color='k', alpha=0.5, linewidth=0, zorder=2) axis.set_rmin(0) axis.set_rmax(90) axis.set_yticklabels([]) axis.set_xticks(np.arange(0, 2 * np.pi, np.pi / 6)) axis.xaxis.set_tick_params(pad=-3) axis.yaxis.set_major_locator(ticker.MultipleLocator(15)) axis.yaxis.set_minor_locator(ticker.NullLocator()) axis.set_xticklabels( ['N', '', '', 'E', '', '', 'S', '', '', 'W', '', '']) axis.grid(linewidth=0.5, zorder=1) axis.set_xlabel('Keck I Pointing Limits', fontweight='bold') return axis
Python
def _format_axis_date_labels(utc_axis: plt.Axes) -> plt.Axes: """ Format axis date labels so that major ticks occur every hour and minor ticks occur every 15 minutes. Also creates a new axis with local California time as the upper horizontal axis. """ utc_axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) utc_axis.xaxis.set_major_locator(dates.HourLocator(interval=1)) utc_axis.xaxis.set_minor_locator( dates.MinuteLocator(byminute=np.arange(0, 60, 15), interval=1)) pacific_axis = utc_axis.twiny() pacific_axis.set_xlim(utc_axis.get_xlim()) pacific_axis.xaxis.set_major_formatter( dates.DateFormatter('%H:%M', tz=pytz.timezone('US/Pacific'))) pacific_axis.xaxis.set_major_locator(dates.HourLocator(interval=1)) pacific_axis.xaxis.set_minor_locator( dates.MinuteLocator(byminute=np.arange(0, 60, 15), interval=1)) return pacific_axis
def _format_axis_date_labels(utc_axis: plt.Axes) -> plt.Axes: """ Format axis date labels so that major ticks occur every hour and minor ticks occur every 15 minutes. Also creates a new axis with local California time as the upper horizontal axis. """ utc_axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) utc_axis.xaxis.set_major_locator(dates.HourLocator(interval=1)) utc_axis.xaxis.set_minor_locator( dates.MinuteLocator(byminute=np.arange(0, 60, 15), interval=1)) pacific_axis = utc_axis.twiny() pacific_axis.set_xlim(utc_axis.get_xlim()) pacific_axis.xaxis.set_major_formatter( dates.DateFormatter('%H:%M', tz=pytz.timezone('US/Pacific'))) pacific_axis.xaxis.set_major_locator(dates.HourLocator(interval=1)) pacific_axis.xaxis.set_minor_locator( dates.MinuteLocator(byminute=np.arange(0, 60, 15), interval=1)) return pacific_axis
Python
def _get_ephemeris(starting_datetime: str, ending_datetime: str, target: str, step: str = '1m', airmass_lessthan: int | float = 2, skip_daylight: bool = False) -> dict: """ Query the JPL Horizons System and get a dictionary of ephemeris information. """ epochs = {'start': starting_datetime, 'stop': ending_datetime, 'step': step} obj = Horizons(id=target, location='568', epochs=epochs) return obj.ephemerides(airmass_lessthan=airmass_lessthan, skip_daylight=skip_daylight)
def _get_ephemeris(starting_datetime: str, ending_datetime: str, target: str, step: str = '1m', airmass_lessthan: int | float = 2, skip_daylight: bool = False) -> dict: """ Query the JPL Horizons System and get a dictionary of ephemeris information. """ epochs = {'start': starting_datetime, 'stop': ending_datetime, 'step': step} obj = Horizons(id=target, location='568', epochs=epochs) return obj.ephemerides(airmass_lessthan=airmass_lessthan, skip_daylight=skip_daylight)
Python
def _get_eclipse_indices(ephemeris: dict) -> np.ndarray: """ Search through an ephemeris table and find when a satellite is eclipsed by Jupiter and it's either night, astronomical or nautical twilight on Mauna Kea. """ return np.where((ephemeris['sat_vis'] == 'u') & (ephemeris['solar_presence'] != 'C') & (ephemeris['solar_presence'] != '*'))[0]
def _get_eclipse_indices(ephemeris: dict) -> np.ndarray: """ Search through an ephemeris table and find when a satellite is eclipsed by Jupiter and it's either night, astronomical or nautical twilight on Mauna Kea. """ return np.where((ephemeris['sat_vis'] == 'u') & (ephemeris['solar_presence'] != 'C') & (ephemeris['solar_presence'] != '*'))[0]
Python
def _calculate_angular_separation(self) -> u.Quantity: """ Calculate the angular separation between targets. """ target2_ephemeris = _get_ephemeris( self._target1_ephemeris['datetime_str'][0], self._target1_ephemeris['datetime_str'][-1], target=self._target2_name, airmass_lessthan=3) self._target2_ephemeris = target2_ephemeris target_1_positions = SkyCoord(ra=self._target1_ephemeris['RA'], dec=self._target1_ephemeris['DEC']) target_2_positions = SkyCoord(ra=target2_ephemeris['RA'], dec=target2_ephemeris['DEC']) return target_1_positions.separation(target_2_positions).to(u.arcsec)
def _calculate_angular_separation(self) -> u.Quantity: """ Calculate the angular separation between targets. """ target2_ephemeris = _get_ephemeris( self._target1_ephemeris['datetime_str'][0], self._target1_ephemeris['datetime_str'][-1], target=self._target2_name, airmass_lessthan=3) self._target2_ephemeris = target2_ephemeris target_1_positions = SkyCoord(ra=self._target1_ephemeris['RA'], dec=self._target1_ephemeris['DEC']) target_2_positions = SkyCoord(ra=target2_ephemeris['RA'], dec=target2_ephemeris['DEC']) return target_1_positions.separation(target_2_positions).to(u.arcsec)
Python
def _consecutive_integers( data: np.ndarray, stepsize: int = 1) -> np.ndarray: """ Find sets of consecutive integers (find independent events in an ephemeris table). """ return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def _consecutive_integers( data: np.ndarray, stepsize: int = 1) -> np.ndarray: """ Find sets of consecutive integers (find independent events in an ephemeris table). """ return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
Python
def _find_eclipses(self) -> list[dict]: """ Find the eclipses by first querying the JPL Horizons System in 1-hour intervals, finding the eclipse events, then performs a more refined search around those events in 1-minute intervals. """ data = [] initial_ephemeris = _get_ephemeris(self._starting_datetime, self._ending_datetime, target=self._target, step='1h') eclipses = self._consecutive_integers( _get_eclipse_indices(initial_ephemeris)) if len(eclipses[-1]) == 0: raise Exception('Sorry, no eclipses found!') for eclipse in eclipses: self._target_name = \ initial_ephemeris['targetname'][0].split(' ')[0] starting_time = _convert_string_to_datetime( initial_ephemeris[eclipse[0]]['datetime_str']) starting_time -= datetime.timedelta(days=1) starting_time = _convert_datetime_to_string(starting_time) ending_time = _convert_string_to_datetime( initial_ephemeris[eclipse[-1]]['datetime_str']) ending_time += datetime.timedelta(days=1) ending_time = _convert_datetime_to_string(ending_time) ephemeris = _get_ephemeris(starting_time, ending_time, target=self._target, step='1m') indices = _get_eclipse_indices(ephemeris) refined_ephemeris = _get_ephemeris( ephemeris['datetime_str'][indices[0]], ephemeris['datetime_str'][indices[-1]], target=self._target) data.append(refined_ephemeris) return data
def _find_eclipses(self) -> list[dict]: """ Find the eclipses by first querying the JPL Horizons System in 1-hour intervals, finding the eclipse events, then performs a more refined search around those events in 1-minute intervals. """ data = [] initial_ephemeris = _get_ephemeris(self._starting_datetime, self._ending_datetime, target=self._target, step='1h') eclipses = self._consecutive_integers( _get_eclipse_indices(initial_ephemeris)) if len(eclipses[-1]) == 0: raise Exception('Sorry, no eclipses found!') for eclipse in eclipses: self._target_name = \ initial_ephemeris['targetname'][0].split(' ')[0] starting_time = _convert_string_to_datetime( initial_ephemeris[eclipse[0]]['datetime_str']) starting_time -= datetime.timedelta(days=1) starting_time = _convert_datetime_to_string(starting_time) ending_time = _convert_string_to_datetime( initial_ephemeris[eclipse[-1]]['datetime_str']) ending_time += datetime.timedelta(days=1) ending_time = _convert_datetime_to_string(ending_time) ephemeris = _get_ephemeris(starting_time, ending_time, target=self._target, step='1m') indices = _get_eclipse_indices(ephemeris) refined_ephemeris = _get_ephemeris( ephemeris['datetime_str'][indices[0]], ephemeris['datetime_str'][indices[-1]], target=self._target) data.append(refined_ephemeris) return data
Python
def _print_string(self) -> str: """ Format a terminal-printable summary table of the identified eclipses along with starting/ending times in both UTC and local California time, the duration of the eclipse, the range in airmass and the satellite's relative velocity. """ print(f'\n{len(self._eclipses)} {self._target_name} eclipse(s) ' f'identified between {self._starting_datetime} and ' f'{self._ending_datetime}.\n') df = pd.DataFrame( columns=['Starting Time (Keck/UTC)', 'Ending Time (Keck/UTC)', 'Starting Time (California)', 'Ending Time (California)', 'Duration', 'Airmass Range', 'Relative Velocity']) for eclipse in range(len(self._eclipses)): times = self._eclipses[eclipse]['datetime_str'] airmass = self._eclipses[eclipse]['airmass'] relative_velocity = np.mean(self._eclipses[eclipse]['delta_rate']) starting_time_utc = times[0] ending_time_utc = times[-1] data = { 'Starting Time (Keck/UTC)': _convert_ephemeris_date_to_string(starting_time_utc), 'Ending Time (Keck/UTC)': _convert_ephemeris_date_to_string(ending_time_utc), 'Starting Time (California)': _convert_datetime_to_string( _convert_to_california_time(starting_time_utc)), 'Ending Time (California)': _convert_datetime_to_string( _convert_to_california_time(ending_time_utc)), 'Duration': _calculate_duration(starting_time_utc, ending_time_utc), 'Airmass Range': f"{np.min(airmass):.3f} to {np.max(airmass):.3f}", 'Relative Velocity': f"{relative_velocity:.3f} km/s" } df = pd.concat([df, pd.DataFrame(data, index=[0])]) return pd.DataFrame(df).to_string(index=False, justify='left')
def _print_string(self) -> str: """ Format a terminal-printable summary table of the identified eclipses along with starting/ending times in both UTC and local California time, the duration of the eclipse, the range in airmass and the satellite's relative velocity. """ print(f'\n{len(self._eclipses)} {self._target_name} eclipse(s) ' f'identified between {self._starting_datetime} and ' f'{self._ending_datetime}.\n') df = pd.DataFrame( columns=['Starting Time (Keck/UTC)', 'Ending Time (Keck/UTC)', 'Starting Time (California)', 'Ending Time (California)', 'Duration', 'Airmass Range', 'Relative Velocity']) for eclipse in range(len(self._eclipses)): times = self._eclipses[eclipse]['datetime_str'] airmass = self._eclipses[eclipse]['airmass'] relative_velocity = np.mean(self._eclipses[eclipse]['delta_rate']) starting_time_utc = times[0] ending_time_utc = times[-1] data = { 'Starting Time (Keck/UTC)': _convert_ephemeris_date_to_string(starting_time_utc), 'Ending Time (Keck/UTC)': _convert_ephemeris_date_to_string(ending_time_utc), 'Starting Time (California)': _convert_datetime_to_string( _convert_to_california_time(starting_time_utc)), 'Ending Time (California)': _convert_datetime_to_string( _convert_to_california_time(ending_time_utc)), 'Duration': _calculate_duration(starting_time_utc, ending_time_utc), 'Airmass Range': f"{np.min(airmass):.3f} to {np.max(airmass):.3f}", 'Relative Velocity': f"{relative_velocity:.3f} km/s" } df = pd.concat([df, pd.DataFrame(data, index=[0])]) return pd.DataFrame(df).to_string(index=False, justify='left')
Python
def _plot_line_with_initial_position( axis: plt.Axes, x: ArrayLike, y: u.Quantity, color: str, label: str = None, radius: u.Quantity = None) -> None: """ Plot a line with a scatterplot point at the starting position. Useful so I know on different plots which point corresponds to the beginning of the eclipse. Update 2022-05-11: now includes Jupiter's angular diameter. """ axis.plot(x, y, color=color, linewidth=1) if radius is not None: axis.fill_between(x, y.value+radius.value, y.value-radius.value, color=color, linewidth=0, alpha=0.25) axis.scatter(x[0], y[0], color=color, edgecolors='none', s=9) if label is not None: axis.annotate(label, xy=(x[0], y[0].value), va='center', ha='right', xytext=(-3, 0), fontsize=6, textcoords='offset pixels', color=color)
def _plot_line_with_initial_position( axis: plt.Axes, x: ArrayLike, y: u.Quantity, color: str, label: str = None, radius: u.Quantity = None) -> None: """ Plot a line with a scatterplot point at the starting position. Useful so I know on different plots which point corresponds to the beginning of the eclipse. Update 2022-05-11: now includes Jupiter's angular diameter. """ axis.plot(x, y, color=color, linewidth=1) if radius is not None: axis.fill_between(x, y.value+radius.value, y.value-radius.value, color=color, linewidth=0, alpha=0.25) axis.scatter(x[0], y[0], color=color, edgecolors='none', s=9) if label is not None: axis.annotate(label, xy=(x[0], y[0].value), va='center', ha='right', xytext=(-3, 0), fontsize=6, textcoords='offset pixels', color=color)
Python
def save_summary_graphics(self, save_directory: str = Path.cwd()) -> None: """ Save a summary graphic of each identified eclipse to a specified directory. """ for eclipse in range(len(self._eclipses)): # get relevant quantities times = self._eclipses[eclipse]['datetime_str'] starting_time = times[0] ending_time = times[-1] duration = _calculate_duration(starting_time, ending_time) times = dates.datestr2num(times) polar_angle = 'unknown' observer = Observer.at_site('Keck') sunset = observer.sun_set_time( Time(_convert_string_to_datetime(starting_time)), which='nearest') sunset = _convert_datetime_to_string(sunset.datetime) sunrise = observer.sun_rise_time( Time(_convert_string_to_datetime(ending_time)), which='nearest') sunrise = _convert_datetime_to_string(sunrise.datetime) # make figure and place axes fig = plt.figure(figsize=(5, 4), constrained_layout=True) gs = gridspec.GridSpec(nrows=2, ncols=2, width_ratios=[1, 1.5], figure=fig) info_axis = fig.add_subplot(gs[0, 0]) info_axis.set_frame_on(False) info_axis.set_xticks([]) info_axis.set_yticks([]) alt_az_polar_axis = _keck_one_alt_az_axis( fig.add_subplot(gs[1, 0], projection='polar')) airmass_axis_utc = fig.add_subplot(gs[0, 1]) airmass_axis_utc.set_ylabel('Airmass', fontweight='bold') primary_sep_axis_utc = fig.add_subplot(gs[1, 1]) primary_sep_axis_utc.set_ylabel('Angular Separation [arcsec]', fontweight='bold') # plot data self._plot_line_with_initial_position( alt_az_polar_axis, np.radians(self._eclipses[eclipse]['AZ']), self._eclipses[eclipse]['EL'], color='k') self._plot_line_with_initial_position( airmass_axis_utc, times, self._eclipses[eclipse]['airmass'], color='k') airmass_axis_california = _format_axis_date_labels( airmass_axis_utc) for ind, target in enumerate( [target_info[key]['ID'] for key in target_info.keys()]): angular_separation = _AngularSeparation( self._eclipses[eclipse], target) # get Jupiter's average polar angle rotation when calculating # it's ephemerides radius = 0 * u.arcsec if target == '599': polar_angle = np.mean( angular_separation.target_2_ephemeris['NPole_ang']) radius = angular_separation.angular_radius if np.sum(angular_separation.values != 0): self._plot_line_with_initial_position( primary_sep_axis_utc, times, angular_separation.values, color=target_info[ list(target_info.keys())[ind]]['color'], label=angular_separation.target_2_ephemeris[ 'targetname'][0][0], radius=radius) primary_sep_axis_california = _format_axis_date_labels( primary_sep_axis_utc) # information string, it's beastly but I don't know a better way of # doing it... info_string = 'California Start:' + '\n' info_string += 'California End:' + '\n' * 2 info_string += 'Keck Start:' + '\n' info_string += 'Keck End:' + '\n' * 2 info_string += 'Keck Sunset:' + '\n' info_string += 'Keck Sunrise:' + '\n' * 2 info_string += f'Duration: {duration}' + '\n' info_string += 'Jupiter North Pole Angle: ' info_string += fr"{polar_angle:.1f}$\degree$" + '\n' info_string += f'{self._target_name} Relative Velocity: ' info_string += \ fr"${np.mean(self._eclipses[eclipse]['delta_rate']):.3f}$ km/s" times_string = _convert_datetime_to_string( _convert_to_california_time(starting_time)) times_string += '\n' times_string += _convert_datetime_to_string( _convert_to_california_time(ending_time)) times_string += '\n' * 2 times_string += f'{starting_time} UTC' + '\n' times_string += f'{ending_time} UTC' + '\n' times_string += '\n' times_string += f'{sunset} UTC' + '\n' times_string += f'{sunrise} UTC' info_axis.text(0.05, 0.95, info_string, linespacing=1.67, ha='left', va='top', fontsize=6) info_axis.text(0.4, 0.95, times_string, linespacing=1.67, ha='left', va='top', transform=info_axis.transAxes, fontsize=6) info_axis.set_title('Eclipse Information', fontweight='bold') # set axis labels, limits and other parameters airmass_axis_california.set_xlabel('Time (California)', fontweight='bold') airmass_axis_utc.set_xticklabels([]) primary_sep_axis_utc.set_xlabel('Time (UTC)', fontweight='bold') primary_sep_axis_california.set_xticklabels([]) alt_az_polar_axis.set_rmin(90) alt_az_polar_axis.set_rmax(0) airmass_axis_utc.set_ylim(1, 2) primary_sep_axis_utc.set_ylim(bottom=0) # save the figure filename_date_str = datetime.datetime.strftime( _convert_string_to_datetime(starting_time), '%Y-%m-%d') filepath = Path(save_directory, f'{self._target_name.lower()}_' f'{filename_date_str.lower()}.pdf') if not filepath.parent.exists(): filepath.mkdir(parents=True) plt.savefig(filepath) plt.close(fig)
def save_summary_graphics(self, save_directory: str = Path.cwd()) -> None: """ Save a summary graphic of each identified eclipse to a specified directory. """ for eclipse in range(len(self._eclipses)): # get relevant quantities times = self._eclipses[eclipse]['datetime_str'] starting_time = times[0] ending_time = times[-1] duration = _calculate_duration(starting_time, ending_time) times = dates.datestr2num(times) polar_angle = 'unknown' observer = Observer.at_site('Keck') sunset = observer.sun_set_time( Time(_convert_string_to_datetime(starting_time)), which='nearest') sunset = _convert_datetime_to_string(sunset.datetime) sunrise = observer.sun_rise_time( Time(_convert_string_to_datetime(ending_time)), which='nearest') sunrise = _convert_datetime_to_string(sunrise.datetime) # make figure and place axes fig = plt.figure(figsize=(5, 4), constrained_layout=True) gs = gridspec.GridSpec(nrows=2, ncols=2, width_ratios=[1, 1.5], figure=fig) info_axis = fig.add_subplot(gs[0, 0]) info_axis.set_frame_on(False) info_axis.set_xticks([]) info_axis.set_yticks([]) alt_az_polar_axis = _keck_one_alt_az_axis( fig.add_subplot(gs[1, 0], projection='polar')) airmass_axis_utc = fig.add_subplot(gs[0, 1]) airmass_axis_utc.set_ylabel('Airmass', fontweight='bold') primary_sep_axis_utc = fig.add_subplot(gs[1, 1]) primary_sep_axis_utc.set_ylabel('Angular Separation [arcsec]', fontweight='bold') # plot data self._plot_line_with_initial_position( alt_az_polar_axis, np.radians(self._eclipses[eclipse]['AZ']), self._eclipses[eclipse]['EL'], color='k') self._plot_line_with_initial_position( airmass_axis_utc, times, self._eclipses[eclipse]['airmass'], color='k') airmass_axis_california = _format_axis_date_labels( airmass_axis_utc) for ind, target in enumerate( [target_info[key]['ID'] for key in target_info.keys()]): angular_separation = _AngularSeparation( self._eclipses[eclipse], target) # get Jupiter's average polar angle rotation when calculating # it's ephemerides radius = 0 * u.arcsec if target == '599': polar_angle = np.mean( angular_separation.target_2_ephemeris['NPole_ang']) radius = angular_separation.angular_radius if np.sum(angular_separation.values != 0): self._plot_line_with_initial_position( primary_sep_axis_utc, times, angular_separation.values, color=target_info[ list(target_info.keys())[ind]]['color'], label=angular_separation.target_2_ephemeris[ 'targetname'][0][0], radius=radius) primary_sep_axis_california = _format_axis_date_labels( primary_sep_axis_utc) # information string, it's beastly but I don't know a better way of # doing it... info_string = 'California Start:' + '\n' info_string += 'California End:' + '\n' * 2 info_string += 'Keck Start:' + '\n' info_string += 'Keck End:' + '\n' * 2 info_string += 'Keck Sunset:' + '\n' info_string += 'Keck Sunrise:' + '\n' * 2 info_string += f'Duration: {duration}' + '\n' info_string += 'Jupiter North Pole Angle: ' info_string += fr"{polar_angle:.1f}$\degree$" + '\n' info_string += f'{self._target_name} Relative Velocity: ' info_string += \ fr"${np.mean(self._eclipses[eclipse]['delta_rate']):.3f}$ km/s" times_string = _convert_datetime_to_string( _convert_to_california_time(starting_time)) times_string += '\n' times_string += _convert_datetime_to_string( _convert_to_california_time(ending_time)) times_string += '\n' * 2 times_string += f'{starting_time} UTC' + '\n' times_string += f'{ending_time} UTC' + '\n' times_string += '\n' times_string += f'{sunset} UTC' + '\n' times_string += f'{sunrise} UTC' info_axis.text(0.05, 0.95, info_string, linespacing=1.67, ha='left', va='top', fontsize=6) info_axis.text(0.4, 0.95, times_string, linespacing=1.67, ha='left', va='top', transform=info_axis.transAxes, fontsize=6) info_axis.set_title('Eclipse Information', fontweight='bold') # set axis labels, limits and other parameters airmass_axis_california.set_xlabel('Time (California)', fontweight='bold') airmass_axis_utc.set_xticklabels([]) primary_sep_axis_utc.set_xlabel('Time (UTC)', fontweight='bold') primary_sep_axis_california.set_xticklabels([]) alt_az_polar_axis.set_rmin(90) alt_az_polar_axis.set_rmax(0) airmass_axis_utc.set_ylim(1, 2) primary_sep_axis_utc.set_ylim(bottom=0) # save the figure filename_date_str = datetime.datetime.strftime( _convert_string_to_datetime(starting_time), '%Y-%m-%d') filepath = Path(save_directory, f'{self._target_name.lower()}_' f'{filename_date_str.lower()}.pdf') if not filepath.parent.exists(): filepath.mkdir(parents=True) plt.savefig(filepath) plt.close(fig)
Python
def _convert_string_to_datetime(time_string: str) -> datetime.datetime: """ Convert an ephemeris table datetime string to a Python datetime object. """ return datetime.datetime.strptime(time_string, '%Y-%b-%d %H:%M')
def _convert_string_to_datetime(time_string: str) -> datetime.datetime: """ Convert an ephemeris table datetime string to a Python datetime object. """ return datetime.datetime.strptime(time_string, '%Y-%b-%d %H:%M')
Python
def _convert_datetime_to_string(datetime_object: datetime.datetime) -> str: """ Convert a Python datetime object to a string with the format YYYY-MM-DD HH:MM. """ return datetime.datetime.strftime(datetime_object, '%Y-%b-%d %H:%M %Z').strip()
def _convert_datetime_to_string(datetime_object: datetime.datetime) -> str: """ Convert a Python datetime object to a string with the format YYYY-MM-DD HH:MM. """ return datetime.datetime.strftime(datetime_object, '%Y-%b-%d %H:%M %Z').strip()
Python
def _convert_ephemeris_date_to_string(ephemeris_datetime: str) -> str: """ Ensures an ephemeris datetime is in the proper format. """ return _convert_datetime_to_string( _convert_string_to_datetime(ephemeris_datetime))
def _convert_ephemeris_date_to_string(ephemeris_datetime: str) -> str: """ Ensures an ephemeris datetime is in the proper format. """ return _convert_datetime_to_string( _convert_string_to_datetime(ephemeris_datetime))
Python
def _convert_to_california_time(utc_time_string: str) -> datetime.datetime: """ Convert a UTC datetime string to local time at Caltech. """ datetime_object = _convert_string_to_datetime(utc_time_string) timezone = pytz.timezone('America/Los_Angeles') datetime_object = pytz.utc.localize(datetime_object) return datetime_object.astimezone(timezone)
def _convert_to_california_time(utc_time_string: str) -> datetime.datetime: """ Convert a UTC datetime string to local time at Caltech. """ datetime_object = _convert_string_to_datetime(utc_time_string) timezone = pytz.timezone('America/Los_Angeles') datetime_object = pytz.utc.localize(datetime_object) return datetime_object.astimezone(timezone)
Python
def _calculate_duration(starting_time: str, ending_time: str) -> str: """ Determine duration between two datetime strings to minute precision. """ duration = _convert_string_to_datetime( ending_time) - _convert_string_to_datetime(starting_time) minutes, seconds = divmod(duration.seconds, 60) hours, minutes = divmod(minutes, 60) return f'{hours}:{minutes:0>2}'
def _calculate_duration(starting_time: str, ending_time: str) -> str: """ Determine duration between two datetime strings to minute precision. """ duration = _convert_string_to_datetime( ending_time) - _convert_string_to_datetime(starting_time) minutes, seconds = divmod(duration.seconds, 60) hours, minutes = divmod(minutes, 60) return f'{hours}:{minutes:0>2}'
Python
def data_setup(): """Sets up logging, random seeds and corpus""" # global variables # Set the random seed manually for reproducibility. random.seed(g.args.seed) np.random.seed(g.args.seed) torch.manual_seed(g.args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(g.args.seed) torch.cuda.set_device(g.args.local_rank) g.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ############################################################################### # Load data ############################################################################### g.corpus = get_lm_corpus(g.args.data, g.args.dataset, use_bpe=g.args.bpe, valid_custom=g.args.valid_custom) g.ntokens = len(g.corpus.vocab) g.va_iter, g.te_iter = [ g.corpus.get_dist_iterator(split, bsz=g.args.batch_size * 2, bptt=g.args.tgt_len, rank=util.get_global_rank(), max_rank=util.get_world_size(), device=g.device, ext_len=g.args.ext_len) for split in ('valid', 'test') ] if g.args.valid_custom: g.va_custom_iter = g.corpus.get_dist_iterator('valid_custom', bsz=g.args.batch_size * 2, bptt=g.args.tgt_len, rank=util.get_global_rank(), max_rank=util.get_world_size(), device=g.device, ext_len=g.args.ext_len)
def data_setup(): """Sets up logging, random seeds and corpus""" # global variables # Set the random seed manually for reproducibility. random.seed(g.args.seed) np.random.seed(g.args.seed) torch.manual_seed(g.args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(g.args.seed) torch.cuda.set_device(g.args.local_rank) g.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ############################################################################### # Load data ############################################################################### g.corpus = get_lm_corpus(g.args.data, g.args.dataset, use_bpe=g.args.bpe, valid_custom=g.args.valid_custom) g.ntokens = len(g.corpus.vocab) g.va_iter, g.te_iter = [ g.corpus.get_dist_iterator(split, bsz=g.args.batch_size * 2, bptt=g.args.tgt_len, rank=util.get_global_rank(), max_rank=util.get_world_size(), device=g.device, ext_len=g.args.ext_len) for split in ('valid', 'test') ] if g.args.valid_custom: g.va_custom_iter = g.corpus.get_dist_iterator('valid_custom', bsz=g.args.batch_size * 2, bptt=g.args.tgt_len, rank=util.get_global_rank(), max_rank=util.get_world_size(), device=g.device, ext_len=g.args.ext_len)
Python
def predict(self, test_data, fcn_obs, par=None): """ Model predictions based on test points and the kernel parameters. Parameters ---------- test_data : ndarray Test points where to generate data. fcn_obs : ndarray Observed function values at the point-set locations. par : ndarray Kernel parameters, default `par=None`. Returns ------- mean : ndarray Model predictive mean at the test point locations. var : ndarray Model predictive variance at the test point locations. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass. """ pass
def predict(self, test_data, fcn_obs, par=None): """ Model predictions based on test points and the kernel parameters. Parameters ---------- test_data : ndarray Test points where to generate data. fcn_obs : ndarray Observed function values at the point-set locations. par : ndarray Kernel parameters, default `par=None`. Returns ------- mean : ndarray Model predictive mean at the test point locations. var : ndarray Model predictive variance at the test point locations. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass. """ pass
Python
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter): """ Negative logarithm of marginal likelihood of the model given the kernel parameters and the function observations. Parameters ---------- log_par : ndarray Logarithm of the kernel parameters. fcn_obs : ndarray Observed function values at the inputs supplied in `x_obs`. x_obs : ndarray Function inputs. jitter : ndarray Regularization term for kernel matrix inversion. Returns ------- float Negative log marginal likelihood. Notes ----- Intends to be used as an objective function passed into the optimizer, thus it needs to subscribe to certain implementation conventions. """ pass
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter): """ Negative logarithm of marginal likelihood of the model given the kernel parameters and the function observations. Parameters ---------- log_par : ndarray Logarithm of the kernel parameters. fcn_obs : ndarray Observed function values at the inputs supplied in `x_obs`. x_obs : ndarray Function inputs. jitter : ndarray Regularization term for kernel matrix inversion. Returns ------- float Negative log marginal likelihood. Notes ----- Intends to be used as an objective function passed into the optimizer, thus it needs to subscribe to certain implementation conventions. """ pass
Python
def exp_model_variance(self, fcn_obs): """ Expected model variance given the function observations and the kernel parameters. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass and should be easily accomplished using the kernel expectation method from the `Kernel` class. Parameters ---------- fcn_obs : numpy.ndarray Observed function values at the point-set locations. Returns ------- float Expected model variance. """ pass
def exp_model_variance(self, fcn_obs): """ Expected model variance given the function observations and the kernel parameters. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass and should be easily accomplished using the kernel expectation method from the `Kernel` class. Parameters ---------- fcn_obs : numpy.ndarray Observed function values at the point-set locations. Returns ------- float Expected model variance. """ pass
Python
def integral_variance(self, fcn_obs, par=None): """ Integral variance given the function value observations and the kernel parameters. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass and should be easily accomplished using the kernel expectation method from the `Kernel` class. Parameters ---------- fcn_obs : numpy.ndarray Observed function values at the point-set locations. par : numpy.ndarray Kernel parameters, default `par=None`. Returns ------- float Variance of the integral. """ pass
def integral_variance(self, fcn_obs, par=None): """ Integral variance given the function value observations and the kernel parameters. Notes ----- This is an abstract method. Implementation needs to be provided by the subclass and should be easily accomplished using the kernel expectation method from the `Kernel` class. Parameters ---------- fcn_obs : numpy.ndarray Observed function values at the point-set locations. par : numpy.ndarray Kernel parameters, default `par=None`. Returns ------- float Variance of the integral. """ pass
Python
def synthetic_demo(steps=250, mc_sims=5000): """ An experiment replicating the conditions of the synthetic example in _[1] used for testing non-additive student's t sigma-point filters. Parameters ---------- steps mc_sims Returns ------- """ # generate data sys = SyntheticSys() x, z = sys.simulate(steps, mc_sims) # load data from mat-file # from scipy.io import loadmat # datadict = loadmat('synth_data', variable_names=('x', 'y')) # x, z = datadict['x'][:, 1:, :], datadict['y'][:, 1:, :] # init SSM for the filter ssm = SyntheticSSM() # kernel parameters for TPQ and GPQ filters # TPQ Student a, b = 10, 30 par_dyn_tp = np.array([[0.4, a, a]]) par_obs_tp = np.array([[0.4, b, b, b, b]]) # par_dyn_tp = np.array([[1.0, 1.7, 1.7]]) # par_obs_tp = np.array([[1.1, 3.0, 3.0, 3.0, 3.0]]) # GPQ Student par_dyn_gpqs = np.array([[1.0, 5, 5]]) par_obs_gpqs = np.array([[0.9, 4, 4, 4, 4]]) # GPQ Kalman par_dyn_gpqk = np.array([[1.0, 2.0, 2.0]]) par_obs_gpqk = np.array([[1.0, 2.0, 2.0, 2.0, 2.0]]) # parameters of the point-set par_pt = {'kappa': None} # init filters filters = ( # ExtendedStudent(ssm), FSQStudent(ssm, kappa=None), # UnscentedKalman(ssm, kappa=-1), TPQStudent(ssm, par_dyn_tp, par_obs_tp, dof=4.0, dof_tp=4.0, point_par=par_pt), # GPQStudent(ssm, par_dyn_gpqs, par_obs_gpqs), # TPQKalman(ssm, par_dyn_gpqk, par_obs_gpqk, points='fs', point_hyp=par_pt), # GPQKalman(ssm, par_dyn_gpqk, par_obs_gpqk, points='fs', point_hyp=par_pt), ) # assign weights approximated by MC with lots of samples # very dirty code pts = filters[1].tf_dyn.model.points kern = filters[1].tf_dyn.model.kernel wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(1e6), 1000) for f in filters: if isinstance(f.tf_dyn, BQTransform): f.tf_dyn.wm, f.tf_dyn.Wc, f.tf_dyn.Wcc = wm, wc, wcc f.tf_dyn.Q = Q pts = filters[1].tf_meas.model.points kern = filters[1].tf_meas.model.kernel wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(1e6), 1000) for f in filters: if isinstance(f.tf_meas, BQTransform): f.tf_meas.wm, f.tf_meas.Wc, f.tf_meas.Wcc = wm, wc, wcc f.tf_meas.Q = Q mf, Pf = run_filters(filters, z) rmse_avg, lcr_avg = eval_perf_scores(x, mf, Pf) # print out table import pandas as pd f_label = [f.__class__.__name__ for f in filters] m_label = ['MEAN_RMSE', 'MAX_RMSE', 'MEAN_INC', 'MAX_INC'] data = np.array([rmse_avg.mean(axis=0), rmse_avg.max(axis=0), lcr_avg.mean(axis=0), lcr_avg.max(axis=0)]).T table = pd.DataFrame(data, f_label, m_label) print(table) # print kernel parameters parlab = ['alpha'] + ['ell_{}'.format(d+1) for d in range(4)] partable = pd.DataFrame(np.vstack((np.hstack((par_dyn_tp.squeeze(), np.zeros((2,)))), par_obs_tp)), columns=parlab, index=['dyn', 'obs']) print() print(partable)
def synthetic_demo(steps=250, mc_sims=5000): """ An experiment replicating the conditions of the synthetic example in _[1] used for testing non-additive student's t sigma-point filters. Parameters ---------- steps mc_sims Returns ------- """ # generate data sys = SyntheticSys() x, z = sys.simulate(steps, mc_sims) # load data from mat-file # from scipy.io import loadmat # datadict = loadmat('synth_data', variable_names=('x', 'y')) # x, z = datadict['x'][:, 1:, :], datadict['y'][:, 1:, :] # init SSM for the filter ssm = SyntheticSSM() # kernel parameters for TPQ and GPQ filters # TPQ Student a, b = 10, 30 par_dyn_tp = np.array([[0.4, a, a]]) par_obs_tp = np.array([[0.4, b, b, b, b]]) # par_dyn_tp = np.array([[1.0, 1.7, 1.7]]) # par_obs_tp = np.array([[1.1, 3.0, 3.0, 3.0, 3.0]]) # GPQ Student par_dyn_gpqs = np.array([[1.0, 5, 5]]) par_obs_gpqs = np.array([[0.9, 4, 4, 4, 4]]) # GPQ Kalman par_dyn_gpqk = np.array([[1.0, 2.0, 2.0]]) par_obs_gpqk = np.array([[1.0, 2.0, 2.0, 2.0, 2.0]]) # parameters of the point-set par_pt = {'kappa': None} # init filters filters = ( # ExtendedStudent(ssm), FSQStudent(ssm, kappa=None), # UnscentedKalman(ssm, kappa=-1), TPQStudent(ssm, par_dyn_tp, par_obs_tp, dof=4.0, dof_tp=4.0, point_par=par_pt), # GPQStudent(ssm, par_dyn_gpqs, par_obs_gpqs), # TPQKalman(ssm, par_dyn_gpqk, par_obs_gpqk, points='fs', point_hyp=par_pt), # GPQKalman(ssm, par_dyn_gpqk, par_obs_gpqk, points='fs', point_hyp=par_pt), ) # assign weights approximated by MC with lots of samples # very dirty code pts = filters[1].tf_dyn.model.points kern = filters[1].tf_dyn.model.kernel wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(1e6), 1000) for f in filters: if isinstance(f.tf_dyn, BQTransform): f.tf_dyn.wm, f.tf_dyn.Wc, f.tf_dyn.Wcc = wm, wc, wcc f.tf_dyn.Q = Q pts = filters[1].tf_meas.model.points kern = filters[1].tf_meas.model.kernel wm, wc, wcc, Q = rbf_student_mc_weights(pts, kern, int(1e6), 1000) for f in filters: if isinstance(f.tf_meas, BQTransform): f.tf_meas.wm, f.tf_meas.Wc, f.tf_meas.Wcc = wm, wc, wcc f.tf_meas.Q = Q mf, Pf = run_filters(filters, z) rmse_avg, lcr_avg = eval_perf_scores(x, mf, Pf) # print out table import pandas as pd f_label = [f.__class__.__name__ for f in filters] m_label = ['MEAN_RMSE', 'MAX_RMSE', 'MEAN_INC', 'MAX_INC'] data = np.array([rmse_avg.mean(axis=0), rmse_avg.max(axis=0), lcr_avg.mean(axis=0), lcr_avg.max(axis=0)]).T table = pd.DataFrame(data, f_label, m_label) print(table) # print kernel parameters parlab = ['alpha'] + ['ell_{}'.format(d+1) for d in range(4)] partable = pd.DataFrame(np.vstack((np.hstack((par_dyn_tp.squeeze(), np.zeros((2,)))), par_obs_tp)), columns=parlab, index=['dyn', 'obs']) print() print(partable)
Python
def dyn_fcn(self, x, q, *args): """ Model describing an object in 2D plane moving with constant speed (magnitude of the velocity vector) and turning with a constant angular rate (executing a coordinated turn). Parameters ---------- x q args Returns ------- """ om = x[4] a = np.sin(om * self.dt) b = np.cos(om * self.dt) c = np.sin(om * self.dt) / om if om != 0 else self.dt d = (1 - np.cos(om * self.dt)) / om if om != 0 else 0 mdyn = np.array([[1, c, 0, -d, 0], [0, b, 0, -a, 0], [0, d, 1, c, 0], [0, a, 0, b, 0], [0, 0, 0, 0, 1]]) return mdyn.dot(x) + q
def dyn_fcn(self, x, q, *args): """ Model describing an object in 2D plane moving with constant speed (magnitude of the velocity vector) and turning with a constant angular rate (executing a coordinated turn). Parameters ---------- x q args Returns ------- """ om = x[4] a = np.sin(om * self.dt) b = np.cos(om * self.dt) c = np.sin(om * self.dt) / om if om != 0 else self.dt d = (1 - np.cos(om * self.dt)) / om if om != 0 else 0 mdyn = np.array([[1, c, 0, -d, 0], [0, b, 0, -a, 0], [0, d, 1, c, 0], [0, a, 0, b, 0], [0, 0, 0, 0, 1]]) return mdyn.dot(x) + q
Python
def meas_fcn(self, x, r, *args): """ Range and bearing measurement from the sensor to the moving object. Parameters ---------- x r args Returns ------- """ rang = np.sqrt(x[0] ** 2 + x[2] ** 2) theta = np.arctan2(x[2], x[0]) return np.asarray([rang, theta]) + r
def meas_fcn(self, x, r, *args): """ Range and bearing measurement from the sensor to the moving object. Parameters ---------- x r args Returns ------- """ rang = np.sqrt(x[0] ** 2 + x[2] ** 2) theta = np.arctan2(x[2], x[0]) return np.asarray([rang, theta]) + r
Python
def sum_of_squares(x, pars, dx=False): """Sum of squares test function. If x is Gaussian random variable than x.T.dot(x) is chi-squared distributed with mean d and variance 2d, where d is the dimension of x. """ if not dx: return np.atleast_1d(x.T.dot(x)) else: return np.atleast_1d(2 * x)
def sum_of_squares(x, pars, dx=False): """Sum of squares test function. If x is Gaussian random variable than x.T.dot(x) is chi-squared distributed with mean d and variance 2d, where d is the dimension of x. """ if not dx: return np.atleast_1d(x.T.dot(x)) else: return np.atleast_1d(2 * x)
Python
def kl_div(mu0, sig0, mu1, sig1): """KL divergence between two Gaussians. """ k = 1 if np.isscalar(mu0) else mu0.shape[0] sig0, sig1 = np.atleast_2d(sig0, sig1) dmu = mu1 - mu0 dmu = np.asarray(dmu) det_sig0 = np.linalg.det(sig0) det_sig1 = np.linalg.det(sig1) inv_sig1 = np.linalg.inv(sig1) kl = 0.5 * (np.trace(np.dot(inv_sig1, sig0)) + np.dot(dmu.T, inv_sig1).dot(dmu) + np.log(det_sig1 / det_sig0) - k) return np.asscalar(kl)
def kl_div(mu0, sig0, mu1, sig1): """KL divergence between two Gaussians. """ k = 1 if np.isscalar(mu0) else mu0.shape[0] sig0, sig1 = np.atleast_2d(sig0, sig1) dmu = mu1 - mu0 dmu = np.asarray(dmu) det_sig0 = np.linalg.det(sig0) det_sig1 = np.linalg.det(sig1) inv_sig1 = np.linalg.inv(sig1) kl = 0.5 * (np.trace(np.dot(inv_sig1, sig0)) + np.dot(dmu.T, inv_sig1).dot(dmu) + np.log(det_sig1 / det_sig0) - k) return np.asscalar(kl)
Python
def taylor_gpqd_demo(f): """Compares performance of GPQ+D-RBF transform w/ finite lengthscale and Linear transform.""" d = 2 # dimension ker_par_gpqd_taylor = np.array([[1.0, 1.0]]) # alpha = 1.0, ell_1 = 1.0 ker_par_gpq = np.array([[1.0] + d*[1.0]]) # function to test on f = toa # sum_of_squares transforms = ( LinearizationTransform(d), TaylorGPQDTransform(d, ker_par_gpqd_taylor), GaussianProcessTransform(d, 1, point_str='ut', kern_par=ker_par_gpq), GaussianProcessDerTransform(d, point_str='ut', kern_par=ker_par_gpq), UnscentedTransform(d, kappa=0.0), # MonteCarlo(d, n=int(1e4)), ) mean = np.array([3, 0]) cov = np.array([[1, 0], [0, 10]]) for ti, t in enumerate(transforms): mean_f, cov_f, cc = t.apply(f, mean, cov, None) print("{}: mean: {}, cov: {}").format(t.__class__.__name__, mean_f, cov_f)
def taylor_gpqd_demo(f): """Compares performance of GPQ+D-RBF transform w/ finite lengthscale and Linear transform.""" d = 2 # dimension ker_par_gpqd_taylor = np.array([[1.0, 1.0]]) # alpha = 1.0, ell_1 = 1.0 ker_par_gpq = np.array([[1.0] + d*[1.0]]) # function to test on f = toa # sum_of_squares transforms = ( LinearizationTransform(d), TaylorGPQDTransform(d, ker_par_gpqd_taylor), GaussianProcessTransform(d, 1, point_str='ut', kern_par=ker_par_gpq), GaussianProcessDerTransform(d, point_str='ut', kern_par=ker_par_gpq), UnscentedTransform(d, kappa=0.0), # MonteCarlo(d, n=int(1e4)), ) mean = np.array([3, 0]) cov = np.array([[1, 0], [0, 10]]) for ti, t in enumerate(transforms): mean_f, cov_f, cc = t.apply(f, mean, cov, None) print("{}: mean: {}, cov: {}").format(t.__class__.__name__, mean_f, cov_f)
Python
def gpq_int_var_demo(): """Compares integral variances of GPQ and GPQ+D by plotting.""" d = 1 f = UNGMTransition(GaussRV(d), GaussRV(d)).dyn_eval mean = np.zeros(d) cov = np.eye(d) kpar = np.array([[10.0] + d * [0.7]]) gpq = GaussianProcessTransform(d, 1, kern_par=kpar, kern_str='rbf', point_str='ut', point_par={'kappa': 0.0}) gpqd = GaussianProcessDerTransform(d, 1, kern_par=kpar, point_str='ut', point_par={'kappa': 0.0}) mct = MonteCarloTransform(d, n=1e4) mean_gpq, cov_gpq, cc_gpq = gpq.apply(f, mean, cov, np.atleast_1d(1.0)) mean_gpqd, cov_gpqd, cc_gpqd = gpqd.apply(f, mean, cov, np.atleast_1d(1.0)) mean_mc, cov_mc, cc_mc = mct.apply(f, mean, cov, np.atleast_1d(1.0)) xmin_gpq = norm.ppf(0.0001, loc=mean_gpq, scale=gpq.model.integral_var) xmax_gpq = norm.ppf(0.9999, loc=mean_gpq, scale=gpq.model.integral_var) xmin_gpqd = norm.ppf(0.0001, loc=mean_gpqd, scale=gpqd.model.integral_var) xmax_gpqd = norm.ppf(0.9999, loc=mean_gpqd, scale=gpqd.model.integral_var) xgpq = np.linspace(xmin_gpq, xmax_gpq, 500) ygpq = norm.pdf(xgpq, loc=mean_gpq, scale=gpq.model.integral_var) xgpqd = np.linspace(xmin_gpqd, xmax_gpqd, 500) ygpqd = norm.pdf(xgpqd, loc=mean_gpqd, scale=gpqd.model.integral_var) plt.figure() plt.plot(xgpq, ygpq, lw=2, label='gpq') plt.plot(xgpqd, ygpqd, lw=2, label='gpq+d') plt.gca().add_line(Line2D([mean_mc, mean_mc], [0, 150], linewidth=2, color='k')) plt.legend() plt.show()
def gpq_int_var_demo(): """Compares integral variances of GPQ and GPQ+D by plotting.""" d = 1 f = UNGMTransition(GaussRV(d), GaussRV(d)).dyn_eval mean = np.zeros(d) cov = np.eye(d) kpar = np.array([[10.0] + d * [0.7]]) gpq = GaussianProcessTransform(d, 1, kern_par=kpar, kern_str='rbf', point_str='ut', point_par={'kappa': 0.0}) gpqd = GaussianProcessDerTransform(d, 1, kern_par=kpar, point_str='ut', point_par={'kappa': 0.0}) mct = MonteCarloTransform(d, n=1e4) mean_gpq, cov_gpq, cc_gpq = gpq.apply(f, mean, cov, np.atleast_1d(1.0)) mean_gpqd, cov_gpqd, cc_gpqd = gpqd.apply(f, mean, cov, np.atleast_1d(1.0)) mean_mc, cov_mc, cc_mc = mct.apply(f, mean, cov, np.atleast_1d(1.0)) xmin_gpq = norm.ppf(0.0001, loc=mean_gpq, scale=gpq.model.integral_var) xmax_gpq = norm.ppf(0.9999, loc=mean_gpq, scale=gpq.model.integral_var) xmin_gpqd = norm.ppf(0.0001, loc=mean_gpqd, scale=gpqd.model.integral_var) xmax_gpqd = norm.ppf(0.9999, loc=mean_gpqd, scale=gpqd.model.integral_var) xgpq = np.linspace(xmin_gpq, xmax_gpq, 500) ygpq = norm.pdf(xgpq, loc=mean_gpq, scale=gpq.model.integral_var) xgpqd = np.linspace(xmin_gpqd, xmax_gpqd, 500) ygpqd = norm.pdf(xgpqd, loc=mean_gpqd, scale=gpqd.model.integral_var) plt.figure() plt.plot(xgpq, ygpq, lw=2, label='gpq') plt.plot(xgpqd, ygpqd, lw=2, label='gpq+d') plt.gca().add_line(Line2D([mean_mc, mean_mc], [0, 150], linewidth=2, color='k')) plt.legend() plt.show()
Python
def gpq_kl_demo(): """Compares moment transforms in terms of symmetrized KL divergence.""" # input dimension d = 2 # unit sigma-points pts = SphericalRadialTransform.unit_sigma_points(d) # derivative mask, which derivatives to use dmask = np.arange(pts.shape[1]) # RBF kernel hyper-parameters hyp = { 'sos': np.array([[10.0] + d*[6.0]]), 'rss': np.array([[10.0] + d*[0.2]]), 'toa': np.array([[10.0] + d*[3.0]]), 'doa': np.array([[1.0] + d*[2.0]]), 'rdr': np.array([[10.0] + d*[5.0]]), } # baseline: Monte Carlo transform w/ 20,000 samples mc_baseline = MonteCarloTransform(d, n=2e4) # tested functions # rss has singularity at 0, therefore no derivative at 0 # toa does not have derivative at 0, for d = 1 # rss, toa and sos can be tested for all d > 0; physically d=2,3 make sense # radar and doa only for d = 2 test_functions = ( # sos, toa, rss, doa, rdr, ) # fix seed np.random.seed(3) # moments of the input Gaussian density mean = np.zeros(d) cov_samples = 100 # space allocation for KL divergence kl_data = np.zeros((3, len(test_functions), cov_samples)) re_data_mean = np.zeros((3, len(test_functions), cov_samples)) re_data_cov = np.zeros((3, len(test_functions), cov_samples)) print('Calculating symmetrized KL-divergences using {:d} covariance samples...'.format(cov_samples)) for i in trange(cov_samples): # random PD matrix a = np.random.randn(d, d) cov = a.dot(a.T) a = np.diag(1.0 / np.sqrt(np.diag(cov))) # 1 on diagonal cov = a.dot(cov).dot(a.T) for idf, f in enumerate(test_functions): # print "Testing {}".format(f.__name__) mean[:d - 1] = 0.2 if f.__name__ in 'rss' else mean[:d - 1] mean[:d - 1] = 3.0 if f.__name__ in 'doa rdr' else mean[:d - 1] jitter = 1e-8 * np.eye(2) if f.__name__ == 'rdr' else 1e-8 * np.eye(1) # baseline moments using Monte Carlo mean_mc, cov_mc, cc = mc_baseline.apply(f, mean, cov, None) # tested moment transforms transforms = ( SphericalRadialTransform(d), GaussianProcessTransform(d, 1, kern_par=hyp[f.__name__], point_str='sr'), GaussianProcessDerTransform(d, 1, kern_par=hyp[f.__name__], point_str='sr', which_der=dmask), ) for idt, t in enumerate(transforms): # apply transform mean_t, cov_t, cc = t.apply(f, mean, cov, None) # calculate KL distance to the baseline moments kl_data[idt, idf, i] = kl_div_sym(mean_mc, cov_mc + jitter, mean_t, cov_t + jitter) re_data_mean[idt, idf, i] = rel_error(mean_mc, mean_t) re_data_cov[idt, idf, i] = rel_error(cov_mc, cov_t) # average over MC samples kl_data = kl_data.mean(axis=2) re_data_mean = re_data_mean.mean(axis=2) re_data_cov = re_data_cov.mean(axis=2) # put into pandas dataframe for nice printing and latex output row_labels = [t.__class__.__name__ for t in transforms] col_labels = [f.__name__ for f in test_functions] kl_df = pd.DataFrame(kl_data, index=row_labels, columns=col_labels) re_mean_df = pd.DataFrame(re_data_mean, index=row_labels, columns=col_labels) re_cov_df = pd.DataFrame(re_data_cov, index=row_labels, columns=col_labels) return kl_df, re_mean_df, re_cov_df
def gpq_kl_demo(): """Compares moment transforms in terms of symmetrized KL divergence.""" # input dimension d = 2 # unit sigma-points pts = SphericalRadialTransform.unit_sigma_points(d) # derivative mask, which derivatives to use dmask = np.arange(pts.shape[1]) # RBF kernel hyper-parameters hyp = { 'sos': np.array([[10.0] + d*[6.0]]), 'rss': np.array([[10.0] + d*[0.2]]), 'toa': np.array([[10.0] + d*[3.0]]), 'doa': np.array([[1.0] + d*[2.0]]), 'rdr': np.array([[10.0] + d*[5.0]]), } # baseline: Monte Carlo transform w/ 20,000 samples mc_baseline = MonteCarloTransform(d, n=2e4) # tested functions # rss has singularity at 0, therefore no derivative at 0 # toa does not have derivative at 0, for d = 1 # rss, toa and sos can be tested for all d > 0; physically d=2,3 make sense # radar and doa only for d = 2 test_functions = ( # sos, toa, rss, doa, rdr, ) # fix seed np.random.seed(3) # moments of the input Gaussian density mean = np.zeros(d) cov_samples = 100 # space allocation for KL divergence kl_data = np.zeros((3, len(test_functions), cov_samples)) re_data_mean = np.zeros((3, len(test_functions), cov_samples)) re_data_cov = np.zeros((3, len(test_functions), cov_samples)) print('Calculating symmetrized KL-divergences using {:d} covariance samples...'.format(cov_samples)) for i in trange(cov_samples): # random PD matrix a = np.random.randn(d, d) cov = a.dot(a.T) a = np.diag(1.0 / np.sqrt(np.diag(cov))) # 1 on diagonal cov = a.dot(cov).dot(a.T) for idf, f in enumerate(test_functions): # print "Testing {}".format(f.__name__) mean[:d - 1] = 0.2 if f.__name__ in 'rss' else mean[:d - 1] mean[:d - 1] = 3.0 if f.__name__ in 'doa rdr' else mean[:d - 1] jitter = 1e-8 * np.eye(2) if f.__name__ == 'rdr' else 1e-8 * np.eye(1) # baseline moments using Monte Carlo mean_mc, cov_mc, cc = mc_baseline.apply(f, mean, cov, None) # tested moment transforms transforms = ( SphericalRadialTransform(d), GaussianProcessTransform(d, 1, kern_par=hyp[f.__name__], point_str='sr'), GaussianProcessDerTransform(d, 1, kern_par=hyp[f.__name__], point_str='sr', which_der=dmask), ) for idt, t in enumerate(transforms): # apply transform mean_t, cov_t, cc = t.apply(f, mean, cov, None) # calculate KL distance to the baseline moments kl_data[idt, idf, i] = kl_div_sym(mean_mc, cov_mc + jitter, mean_t, cov_t + jitter) re_data_mean[idt, idf, i] = rel_error(mean_mc, mean_t) re_data_cov[idt, idf, i] = rel_error(cov_mc, cov_t) # average over MC samples kl_data = kl_data.mean(axis=2) re_data_mean = re_data_mean.mean(axis=2) re_data_cov = re_data_cov.mean(axis=2) # put into pandas dataframe for nice printing and latex output row_labels = [t.__class__.__name__ for t in transforms] col_labels = [f.__name__ for f in test_functions] kl_df = pd.DataFrame(kl_data, index=row_labels, columns=col_labels) re_mean_df = pd.DataFrame(re_data_mean, index=row_labels, columns=col_labels) re_cov_df = pd.DataFrame(re_data_cov, index=row_labels, columns=col_labels) return kl_df, re_mean_df, re_cov_df
Python
def gpq_sos_demo(): """Sum of squares analytical moments compared with GPQ, GPQ+D and Spherical Radial transforms.""" # input dimensions dims = [1, 5, 10, 25] sos_data = np.zeros((6, len(dims))) ivar_data = np.zeros((3, len(dims))) ivar_data[0, :] = dims for di, d in enumerate(dims): # input mean and covariance mean_in, cov_in = np.zeros(d), np.eye(d) # unit sigma-points pts = SphericalRadialTransform.unit_sigma_points(d) # derivative mask, which derivatives to use dmask = np.arange(pts.shape[1]) # RBF kernel hyper-parameters hyp = { 'gpq': np.array([[1.0] + d*[10.0]]), 'gpqd': np.array([[1.0] + d*[10.0]]), } transforms = ( SphericalRadialTransform(d), GaussianProcessTransform(d, 1, kern_par=hyp['gpq'], point_str='sr'), GaussianProcessDerTransform(d, 1, kern_par=hyp['gpqd'], point_str='sr', which_der=dmask), ) ivar_data[1, di] = transforms[1].model.integral_var ivar_data[2, di] = transforms[2].model.integral_var mean_true, cov_true = d, 2 * d # print "{:<15}:\t {:.4f} \t{:.4f}".format("True moments", mean_true, cov_true) for ti, t in enumerate(transforms): m, c, cc = t.apply(sos, mean_in, cov_in, None) sos_data[ti, di] = np.asscalar(m) sos_data[ti + len(transforms), di] = np.asscalar(c) # print "{:<15}:\t {:.4f} \t{:.4f}".format(t.__class__.__name__, np.asscalar(m), np.asscalar(c)) row_labels = [t.__class__.__name__ for t in transforms] col_labels = [str(d) for d in dims] sos_table = pd.DataFrame(sos_data, index=row_labels * 2, columns=col_labels) ivar_table = pd.DataFrame(ivar_data[1:, :], index=['GPQ', 'GPQ+D'], columns=col_labels) return sos_table, ivar_table, ivar_data
def gpq_sos_demo(): """Sum of squares analytical moments compared with GPQ, GPQ+D and Spherical Radial transforms.""" # input dimensions dims = [1, 5, 10, 25] sos_data = np.zeros((6, len(dims))) ivar_data = np.zeros((3, len(dims))) ivar_data[0, :] = dims for di, d in enumerate(dims): # input mean and covariance mean_in, cov_in = np.zeros(d), np.eye(d) # unit sigma-points pts = SphericalRadialTransform.unit_sigma_points(d) # derivative mask, which derivatives to use dmask = np.arange(pts.shape[1]) # RBF kernel hyper-parameters hyp = { 'gpq': np.array([[1.0] + d*[10.0]]), 'gpqd': np.array([[1.0] + d*[10.0]]), } transforms = ( SphericalRadialTransform(d), GaussianProcessTransform(d, 1, kern_par=hyp['gpq'], point_str='sr'), GaussianProcessDerTransform(d, 1, kern_par=hyp['gpqd'], point_str='sr', which_der=dmask), ) ivar_data[1, di] = transforms[1].model.integral_var ivar_data[2, di] = transforms[2].model.integral_var mean_true, cov_true = d, 2 * d # print "{:<15}:\t {:.4f} \t{:.4f}".format("True moments", mean_true, cov_true) for ti, t in enumerate(transforms): m, c, cc = t.apply(sos, mean_in, cov_in, None) sos_data[ti, di] = np.asscalar(m) sos_data[ti + len(transforms), di] = np.asscalar(c) # print "{:<15}:\t {:.4f} \t{:.4f}".format(t.__class__.__name__, np.asscalar(m), np.asscalar(c)) row_labels = [t.__class__.__name__ for t in transforms] col_labels = [str(d) for d in dims] sos_table = pd.DataFrame(sos_data, index=row_labels * 2, columns=col_labels) ivar_table = pd.DataFrame(ivar_data[1:, :], index=['GPQ', 'GPQ+D'], columns=col_labels) return sos_table, ivar_table, ivar_data
Python
def kern_rbf_der(xs, x, alpha=1.0, el=1.0, which_der=None): """RBF kernel w/ derivatives.""" x, xs = np.atleast_2d(x), np.atleast_2d(xs) D, N = x.shape Ds, Ns = xs.shape assert Ds == D which_der = np.arange(N) if which_der is None else which_der Nd = len(which_der) # points w/ derivative observations # extract hypers # alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] iLam = np.diag(el ** -1 * np.ones(D)) iiLam = np.diag(el ** -2 * np.ones(D)) x = iLam.dot(x) # sqrt(Lambda^-1) * X xs = iLam.dot(xs) Kff = np.exp(2 * np.log(alpha) - 0.5 * maha(xs.T, x.T)) # cov(f(xi), f(xj)) x = iLam.dot(x) # Lambda^-1 * X xs = iLam.dot(xs) XmX = xs[..., na] - x[:, na, :] # pair-wise differences Kfd = np.zeros((Ns, D * Nd)) # cov(f(xi), df(xj)) Kdd = np.zeros((D * Nd, D * Nd)) # cov(df(xi), df(xj)) for i in range(Ns): for j in range(Nd): jstart, jend = j * D, j * D + D j_d = which_der[j] Kfd[i, jstart:jend] = Kff[i, j_d] * XmX[:, i, j_d] for i in range(Nd): for j in range(Nd): istart, iend = i * D, i * D + D jstart, jend = j * D, j * D + D i_d, j_d = which_der[i], which_der[j] # indices of points with derivatives Kdd[istart:iend, jstart:jend] = Kff[i_d, j_d] * (iiLam - np.outer(XmX[:, i_d, j_d], XmX[:, i_d, j_d])) return Kff, Kfd, Kdd
def kern_rbf_der(xs, x, alpha=1.0, el=1.0, which_der=None): """RBF kernel w/ derivatives.""" x, xs = np.atleast_2d(x), np.atleast_2d(xs) D, N = x.shape Ds, Ns = xs.shape assert Ds == D which_der = np.arange(N) if which_der is None else which_der Nd = len(which_der) # points w/ derivative observations # extract hypers # alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] iLam = np.diag(el ** -1 * np.ones(D)) iiLam = np.diag(el ** -2 * np.ones(D)) x = iLam.dot(x) # sqrt(Lambda^-1) * X xs = iLam.dot(xs) Kff = np.exp(2 * np.log(alpha) - 0.5 * maha(xs.T, x.T)) # cov(f(xi), f(xj)) x = iLam.dot(x) # Lambda^-1 * X xs = iLam.dot(xs) XmX = xs[..., na] - x[:, na, :] # pair-wise differences Kfd = np.zeros((Ns, D * Nd)) # cov(f(xi), df(xj)) Kdd = np.zeros((D * Nd, D * Nd)) # cov(df(xi), df(xj)) for i in range(Ns): for j in range(Nd): jstart, jend = j * D, j * D + D j_d = which_der[j] Kfd[i, jstart:jend] = Kff[i, j_d] * XmX[:, i, j_d] for i in range(Nd): for j in range(Nd): istart, iend = i * D, i * D + D jstart, jend = j * D, j * D + D i_d, j_d = which_der[i], which_der[j] # indices of points with derivatives Kdd[istart:iend, jstart:jend] = Kff[i_d, j_d] * (iiLam - np.outer(XmX[:, i_d, j_d], XmX[:, i_d, j_d])) return Kff, Kfd, Kdd
Python
def figsize(self, w_scale=1.0, h_scale=1.0): """ Calculates figure width and height given the width and height scale. Parameters ---------- w_scale: float Figure width scale. h_scale: float Figure height scale. Returns ------- list Figure width and height in inches. """ fig_width = self.fig_width_pt * self.INCH_PER_PT * w_scale # width in inches fig_height = fig_width * self.PHI * h_scale # height in inches return [fig_width, fig_height]
def figsize(self, w_scale=1.0, h_scale=1.0): """ Calculates figure width and height given the width and height scale. Parameters ---------- w_scale: float Figure width scale. h_scale: float Figure height scale. Returns ------- list Figure width and height in inches. """ fig_width = self.fig_width_pt * self.INCH_PER_PT * w_scale # width in inches fig_height = fig_width * self.PHI * h_scale # height in inches return [fig_width, fig_height]
Python
def update_default_figsize(self, fig_width_pt): """ Updates default figure size used for saving. Parameters ---------- fig_width_pt : float Width of the figure in points, usually obtained from the journal specs or using the LaTeX command ``\the\columnwidth``. Returns ------- """ self.fig_width_pt = fig_width_pt mpl.rcParams.update({"figure.figsize": self.figsize()})
def update_default_figsize(self, fig_width_pt): """ Updates default figure size used for saving. Parameters ---------- fig_width_pt : float Width of the figure in points, usually obtained from the journal specs or using the LaTeX command ``\the\columnwidth``. Returns ------- """ self.fig_width_pt = fig_width_pt mpl.rcParams.update({"figure.figsize": self.figsize()})
Python
def savefig(filename): """ Save figure to PGF. PDF copy created for viewing convenience. Parameters ---------- filename Returns ------- """ plt.savefig('{}.pgf'.format(filename)) plt.savefig('{}.pdf'.format(filename))
def savefig(filename): """ Save figure to PGF. PDF copy created for viewing convenience. Parameters ---------- filename Returns ------- """ plt.savefig('{}.pgf'.format(filename)) plt.savefig('{}.pdf'.format(filename))
Python
def apply(self, f, mean, cov, fcn_pars, tf_pars=None): """ Transform random variable with given mean and covariance. Parameters ---------- f : function Handle of the nonlinear transforming function acting on the input random variable. mean : (dim, ) ndarray Mean of the input random variable. cov : (dim, dim) ndarray Covariance of the input random variable. fcn_pars : ndarray Parameters of the nonlinear transforming function. tf_pars : ndarray, optional Parameters of the moment transform. Returns ------- mean_f : ndarray Mean of the transformed random variable. cov_f : ndarray Covariance of the transformed random variable. cov_fx : ndarray Covariance between the transformed random variable and the input random variable. """ pass
def apply(self, f, mean, cov, fcn_pars, tf_pars=None): """ Transform random variable with given mean and covariance. Parameters ---------- f : function Handle of the nonlinear transforming function acting on the input random variable. mean : (dim, ) ndarray Mean of the input random variable. cov : (dim, dim) ndarray Covariance of the input random variable. fcn_pars : ndarray Parameters of the nonlinear transforming function. tf_pars : ndarray, optional Parameters of the moment transform. Returns ------- mean_f : ndarray Mean of the transformed random variable. cov_f : ndarray Covariance of the transformed random variable. cov_fx : ndarray Covariance between the transformed random variable and the input random variable. """ pass
Python
def apply(self, f, mean, cov, fcn_pars, tf_pars=None): """ Transform random variable with given mean and covariance. Parameters ---------- f : function Handle of the nonlinear transforming function acting on the input random variable. mean : (dim, ) ndarray Mean of the input random variable. cov : (dim, dim) ndarray Covariance of the input random variable. fcn_pars : ndarray Parameters of the nonlinear transforming function. tf_pars : ndarray, optional Parameters of the moment transform. Returns ------- mean_f : ndarray Mean of the transformed random variable. cov_f : ndarray Covariance of the transformed random variable. cov_fx : ndarray Covariance between the transformed random variable and the input random variable. """ mean = mean[:, na] # form sigma-points from unit sigma-points x = mean + cholesky(cov).dot(self.unit_sp) # push sigma-points through non-linearity fx = np.apply_along_axis(f, 0, x, fcn_pars) # output mean mean_f = fx.dot(self.wm) # output covariance dfx = fx - mean_f[:, na] cov_f = dfx.dot(self.Wc).dot(dfx.T) # input-output covariance cov_fx = dfx.dot(self.Wc).dot((x - mean).T) return mean_f, cov_f, cov_fx
def apply(self, f, mean, cov, fcn_pars, tf_pars=None): """ Transform random variable with given mean and covariance. Parameters ---------- f : function Handle of the nonlinear transforming function acting on the input random variable. mean : (dim, ) ndarray Mean of the input random variable. cov : (dim, dim) ndarray Covariance of the input random variable. fcn_pars : ndarray Parameters of the nonlinear transforming function. tf_pars : ndarray, optional Parameters of the moment transform. Returns ------- mean_f : ndarray Mean of the transformed random variable. cov_f : ndarray Covariance of the transformed random variable. cov_fx : ndarray Covariance between the transformed random variable and the input random variable. """ mean = mean[:, na] # form sigma-points from unit sigma-points x = mean + cholesky(cov).dot(self.unit_sp) # push sigma-points through non-linearity fx = np.apply_along_axis(f, 0, x, fcn_pars) # output mean mean_f = fx.dot(self.wm) # output covariance dfx = fx - mean_f[:, na] cov_f = dfx.dot(self.Wc).dot(dfx.T) # input-output covariance cov_fx = dfx.dot(self.Wc).dot((x - mean).T) return mean_f, cov_f, cov_fx
Python
def dyn_fcn(self, x, q, *args): """ Model describing an object in 2D plane moving with constant speed (magnitude of the velocity vector) and turning with a constant angular rate (executing a coordinated turn). """ om = x[4] a = np.sin(om * self.dt) b = np.cos(om * self.dt) c = np.sin(om * self.dt) / om d = (1 - np.cos(om * self.dt)) / om mdyn = np.array([[1, c, 0, -d, 0], [0, b, 0, -a, 0], [0, d, 1, c, 0], [0, a, 0, b, 0], [0, 0, 0, 0, 1]]) return mdyn.dot(x) + q
def dyn_fcn(self, x, q, *args): """ Model describing an object in 2D plane moving with constant speed (magnitude of the velocity vector) and turning with a constant angular rate (executing a coordinated turn). """ om = x[4] a = np.sin(om * self.dt) b = np.cos(om * self.dt) c = np.sin(om * self.dt) / om d = (1 - np.cos(om * self.dt)) / om mdyn = np.array([[1, c, 0, -d, 0], [0, b, 0, -a, 0], [0, d, 1, c, 0], [0, a, 0, b, 0], [0, 0, 0, 0, 1]]) return mdyn.dot(x) + q
Python
def meas_fcn(self, x, r, time): """ Bearing measurement from the sensor to the moving object. """ dx = x[0] - self.sensor_pos[:, 0] dy = x[1] - self.sensor_pos[:, 1] return np.arctan2(dy, dx) + r
def meas_fcn(self, x, r, time): """ Bearing measurement from the sensor to the moving object. """ dx = x[0] - self.sensor_pos[:, 0] dy = x[1] - self.sensor_pos[:, 1] return np.arctan2(dy, dx) + r
Python
def mt_trunc_demo(mt_trunc, mt, dim=None, full_input_cov=True, **kwargs): """ Comparison of truncated MT and vanilla MT on polar2cartesian transform for increasing state dimensions. The truncated MT is aware of the effective dimension, so we expect it to be closer to the true covariance Observation: Output covariance of the Truncated UT stays closer to the MC baseline than the covariance produced by the vanilla UT. There's some merit to the idea, but the problem is with computing the input-output cross-covariance. Parameters ---------- mt_trunc mt dim full_input_cov : boolean If `False`, a diagonal input covariance is used, otherwise a full covariance is used. kwargs Returns ------- """ assert issubclass(mt_trunc, MomentTransform) and issubclass(mt, MomentTransform) # state dimensions and effective dimension dim = [2, 3, 4, 5] if dim is None else dim d_eff = 2 # nonlinear integrand f = polar2cartesian # input mean and covariance mean_eff, cov_eff = np.array([1, np.pi / 2]), np.diag([0.05 ** 2, (np.pi / 10) ** 2]) if full_input_cov: A = np.random.rand(d_eff, d_eff) cov_eff = A.dot(cov_eff).dot(A.T) # use MC transform with lots of samples to compute the true transformed moments tmc = MonteCarloTransform(d_eff, n=1e4) M_mc, C_mc, cc_mc = tmc.apply(f, mean_eff, cov_eff, None) # transformed samples x = np.random.multivariate_normal(mean_eff, cov_eff, size=int(1e3)).T fx = np.apply_along_axis(f, 0, x, None) X_mc = ellipse_points(M_mc, C_mc) M = np.zeros((2, len(dim), 2)) C = np.zeros((2, 2, len(dim), 2)) X = np.zeros((2, 50, len(dim), 2)) for i, d in enumerate(dim): t = mt_trunc(d, d_eff, **kwargs) s = mt(d, **kwargs) # input mean and covariance mean, cov = np.zeros(d), np.eye(d) mean[:d_eff], cov[:d_eff, :d_eff] = mean_eff, cov_eff # transformed moments (w/o cross-covariance) M[:, i, 0], C[..., i, 0], cc = t.apply(f, mean, cov, None) M[:, i, 1], C[..., i, 1], cc = s.apply(f, mean, cov, None) # points on the ellipse defined by the transformed mean and covariance for plotting X[..., i, 0] = ellipse_points(M[:, i, 0], C[..., i, 0]) X[..., i, 1] = ellipse_points(M[:, i, 1], C[..., i, 1]) # PLOTS: transformed samples, MC mean and covariance ground truth fig, ax = plt.subplots(1, 2) ax[0].plot(fx[0, :], fx[1, :], 'k.', alpha=0.15) ax[0].plot(M_mc[0], M_mc[1], 'ro', markersize=6, lw=2) ax[0].plot(X_mc[0, :], X_mc[1, :], 'r--', lw=2, label='MC') # SR and SR-T mean and covariance for various state dimensions # TODO: it's more effective to plot SKL between the transformed and baseline covariances. for i, d in enumerate(dim): ax[0].plot(M[0, i, 0], M[1, i, 0], 'b+', markersize=10, lw=2) ax[0].plot(X[0, :, i, 0], X[1, :, i, 0], color='b', label='mt-trunc (d={})'.format(d)) for i, d in enumerate(dim): ax[0].plot(M[0, i, 1], M[1, i, 1], 'go', markersize=6) ax[0].plot(X[0, :, i, 1], X[1, :, i, 1], color='g', label='mt (d={})'.format(d)) ax[0].set_aspect('equal') plt.legend() # symmetrized KL-divergence skl = np.zeros((len(dim), 2)) for i, d in enumerate(dim): skl[i, 0] = symmetrized_kl_divergence(M_mc, C_mc, M[:, i, 0], C[..., i, 0]) skl[i, 1] = symmetrized_kl_divergence(M_mc, C_mc, M[:, i, 1], C[..., i, 1]) plt_opt = {'lw': 2, 'marker': 'o'} ax[1].plot(dim, skl[:, 0], label='truncated', **plt_opt) ax[1].plot(dim, skl[:, 1], label='original', **plt_opt) ax[1].set_xticks(dim) ax[1].set_xlabel('Dimension') ax[1].set_ylabel('SKL') plt.legend() plt.show()
def mt_trunc_demo(mt_trunc, mt, dim=None, full_input_cov=True, **kwargs): """ Comparison of truncated MT and vanilla MT on polar2cartesian transform for increasing state dimensions. The truncated MT is aware of the effective dimension, so we expect it to be closer to the true covariance Observation: Output covariance of the Truncated UT stays closer to the MC baseline than the covariance produced by the vanilla UT. There's some merit to the idea, but the problem is with computing the input-output cross-covariance. Parameters ---------- mt_trunc mt dim full_input_cov : boolean If `False`, a diagonal input covariance is used, otherwise a full covariance is used. kwargs Returns ------- """ assert issubclass(mt_trunc, MomentTransform) and issubclass(mt, MomentTransform) # state dimensions and effective dimension dim = [2, 3, 4, 5] if dim is None else dim d_eff = 2 # nonlinear integrand f = polar2cartesian # input mean and covariance mean_eff, cov_eff = np.array([1, np.pi / 2]), np.diag([0.05 ** 2, (np.pi / 10) ** 2]) if full_input_cov: A = np.random.rand(d_eff, d_eff) cov_eff = A.dot(cov_eff).dot(A.T) # use MC transform with lots of samples to compute the true transformed moments tmc = MonteCarloTransform(d_eff, n=1e4) M_mc, C_mc, cc_mc = tmc.apply(f, mean_eff, cov_eff, None) # transformed samples x = np.random.multivariate_normal(mean_eff, cov_eff, size=int(1e3)).T fx = np.apply_along_axis(f, 0, x, None) X_mc = ellipse_points(M_mc, C_mc) M = np.zeros((2, len(dim), 2)) C = np.zeros((2, 2, len(dim), 2)) X = np.zeros((2, 50, len(dim), 2)) for i, d in enumerate(dim): t = mt_trunc(d, d_eff, **kwargs) s = mt(d, **kwargs) # input mean and covariance mean, cov = np.zeros(d), np.eye(d) mean[:d_eff], cov[:d_eff, :d_eff] = mean_eff, cov_eff # transformed moments (w/o cross-covariance) M[:, i, 0], C[..., i, 0], cc = t.apply(f, mean, cov, None) M[:, i, 1], C[..., i, 1], cc = s.apply(f, mean, cov, None) # points on the ellipse defined by the transformed mean and covariance for plotting X[..., i, 0] = ellipse_points(M[:, i, 0], C[..., i, 0]) X[..., i, 1] = ellipse_points(M[:, i, 1], C[..., i, 1]) # PLOTS: transformed samples, MC mean and covariance ground truth fig, ax = plt.subplots(1, 2) ax[0].plot(fx[0, :], fx[1, :], 'k.', alpha=0.15) ax[0].plot(M_mc[0], M_mc[1], 'ro', markersize=6, lw=2) ax[0].plot(X_mc[0, :], X_mc[1, :], 'r--', lw=2, label='MC') # SR and SR-T mean and covariance for various state dimensions # TODO: it's more effective to plot SKL between the transformed and baseline covariances. for i, d in enumerate(dim): ax[0].plot(M[0, i, 0], M[1, i, 0], 'b+', markersize=10, lw=2) ax[0].plot(X[0, :, i, 0], X[1, :, i, 0], color='b', label='mt-trunc (d={})'.format(d)) for i, d in enumerate(dim): ax[0].plot(M[0, i, 1], M[1, i, 1], 'go', markersize=6) ax[0].plot(X[0, :, i, 1], X[1, :, i, 1], color='g', label='mt (d={})'.format(d)) ax[0].set_aspect('equal') plt.legend() # symmetrized KL-divergence skl = np.zeros((len(dim), 2)) for i, d in enumerate(dim): skl[i, 0] = symmetrized_kl_divergence(M_mc, C_mc, M[:, i, 0], C[..., i, 0]) skl[i, 1] = symmetrized_kl_divergence(M_mc, C_mc, M[:, i, 1], C[..., i, 1]) plt_opt = {'lw': 2, 'marker': 'o'} ax[1].plot(dim, skl[:, 0], label='truncated', **plt_opt) ax[1].plot(dim, skl[:, 1], label='original', **plt_opt) ax[1].set_xticks(dim) ax[1].set_xlabel('Dimension') ax[1].set_ylabel('SKL') plt.legend() plt.show()
Python
def _fcn_eval(self, fcn, x, fcn_par): """ Evaluations of the integrand, which can comprise function observations as well as derivative observations. Parameters ---------- fcn : func Integrand as a function handle, which is expected to behave certain way. x : ndarray Argument (input) of the integrand. fcn_par : Parameters of the integrand. Returns ------- : ndarray Function evaluations of shape (out_dim, num_pts). Notes ----- Methods in derived subclasses decides whether to return derivatives also """ # should return as many columns as output dims, one column includes function and derivative evaluations # for every sigma-point, thus it is (n + n*d,); n = # sigma-points, d = sigma-point dimensionality # returned array should be (n + n*d, e); e = output dimensionality # evaluate function at sigmas (e, n) fx = np.apply_along_axis(fcn, 0, x, fcn_par) # Jacobians evaluated only at sigmas specified by which_der array (e * d, n) dfx = np.apply_along_axis(fcn, 0, x[:, self.model.which_der], fcn_par, dx=True) # stack function values and derivative values into one column return np.vstack((fx.T, dfx.T.reshape(self.model.dim_in * len(self.model.which_der), -1))).T
def _fcn_eval(self, fcn, x, fcn_par): """ Evaluations of the integrand, which can comprise function observations as well as derivative observations. Parameters ---------- fcn : func Integrand as a function handle, which is expected to behave certain way. x : ndarray Argument (input) of the integrand. fcn_par : Parameters of the integrand. Returns ------- : ndarray Function evaluations of shape (out_dim, num_pts). Notes ----- Methods in derived subclasses decides whether to return derivatives also """ # should return as many columns as output dims, one column includes function and derivative evaluations # for every sigma-point, thus it is (n + n*d,); n = # sigma-points, d = sigma-point dimensionality # returned array should be (n + n*d, e); e = output dimensionality # evaluate function at sigmas (e, n) fx = np.apply_along_axis(fcn, 0, x, fcn_par) # Jacobians evaluated only at sigmas specified by which_der array (e * d, n) dfx = np.apply_along_axis(fcn, 0, x[:, self.model.which_der], fcn_par, dx=True) # stack function values and derivative values into one column return np.vstack((fx.T, dfx.T.reshape(self.model.dim_in * len(self.model.which_der), -1))).T
Python
def reset(self): """Reset internal variables and flags.""" self.x_mean_pr, self.x_cov_pr = None, None self.x_mean_sm, self.x_cov_sm = None, None self.xx_cov, self.xy_cov = None, None self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None self.fi_mean, self.fi_cov = None, None self.sm_mean, self.sm_cov = None, None self.D, self.N = None, None self.flags = {'filtered': False, 'smoothed': False}
def reset(self): """Reset internal variables and flags.""" self.x_mean_pr, self.x_cov_pr = None, None self.x_mean_sm, self.x_cov_sm = None, None self.xx_cov, self.xy_cov = None, None self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None self.fi_mean, self.fi_cov = None, None self.sm_mean, self.sm_cov = None, None self.D, self.N = None, None self.flags = {'filtered': False, 'smoothed': False}
Python
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Abstract method for time update, which computes predictive moments of state and measurement. Parameters ---------- time : int Time step. Importannt for t-variant systems. theta_dyn : Parameters of the moment transform computing the predictive state moments. theta_obs : Parameters of the moment transform computing the predictive measurement moments. Returns ------- """ pass
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Abstract method for time update, which computes predictive moments of state and measurement. Parameters ---------- time : int Time step. Importannt for t-variant systems. theta_dyn : Parameters of the moment transform computing the predictive state moments. theta_obs : Parameters of the moment transform computing the predictive measurement moments. Returns ------- """ pass
Python
def _measurement_update(self, y, time=None): """ Abstract method for measurement update, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Returns ------- """ pass
def _measurement_update(self, y, time=None): """ Abstract method for measurement update, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Returns ------- """ pass
Python
def _smoothing_update(self): """ Abstract method for smoothing update, which takes filtered states and predictive states from the forward pass and goes backward in time producing smoothed moments of the system state. Returns ------- """ pass
def _smoothing_update(self): """ Abstract method for smoothing update, which takes filtered states and predictive states from the forward pass and goes backward in time producing smoothed moments of the system state. Returns ------- """ pass
Python
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Time update for Gaussian filters and smoothers, computing predictive moments of state and measurement. Parameters ---------- time : int Time step. Important for t-variant systems. theta_dyn : ndarray Parameters of the moment transform computing the predictive state moments. theta_obs : ndarray Parameters of the moment transform computing the predictive measurement moments. """ # in non-additive case, augment mean and covariance mean = self.x_mean_fi if self.mod_dyn.noise_additive else np.hstack((self.x_mean_fi, self.q_mean)) cov = self.x_cov_fi if self.mod_dyn.noise_additive else block_diag(self.x_cov_fi, self.q_cov) assert mean.ndim == 1 and cov.ndim == 2 # apply moment transform to compute predicted state mean, covariance self.x_mean_pr, self.x_cov_pr, self.xx_cov = self.tf_dyn.apply(self.mod_dyn.dyn_eval, mean, cov, np.atleast_1d(time), theta_dyn) if self.mod_dyn.noise_additive: self.x_cov_pr += self.G.dot(self.q_cov).dot(self.G.T) # in non-additive case, augment mean and covariance mean = self.x_mean_pr if self.mod_obs.noise_additive else np.hstack((self.x_mean_pr, self.r_mean)) cov = self.x_cov_pr if self.mod_obs.noise_additive else block_diag(self.x_cov_pr, self.r_cov) assert mean.ndim == 1 and cov.ndim == 2 # apply moment transform to compute measurement mean, covariance self.y_mean_pr, self.y_cov_pr, self.xy_cov = self.tf_obs.apply(self.mod_obs.meas_eval, mean, cov, np.atleast_1d(time), theta_obs) # in additive case, noise covariances need to be added if self.mod_obs.noise_additive: self.y_cov_pr += self.r_cov # in non-additive case, cross-covariances must be trimmed (has no effect in additive case) self.xy_cov = self.xy_cov[:, :self.mod_dyn.dim_state] self.xx_cov = self.xx_cov[:, :self.mod_dyn.dim_state]
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Time update for Gaussian filters and smoothers, computing predictive moments of state and measurement. Parameters ---------- time : int Time step. Important for t-variant systems. theta_dyn : ndarray Parameters of the moment transform computing the predictive state moments. theta_obs : ndarray Parameters of the moment transform computing the predictive measurement moments. """ # in non-additive case, augment mean and covariance mean = self.x_mean_fi if self.mod_dyn.noise_additive else np.hstack((self.x_mean_fi, self.q_mean)) cov = self.x_cov_fi if self.mod_dyn.noise_additive else block_diag(self.x_cov_fi, self.q_cov) assert mean.ndim == 1 and cov.ndim == 2 # apply moment transform to compute predicted state mean, covariance self.x_mean_pr, self.x_cov_pr, self.xx_cov = self.tf_dyn.apply(self.mod_dyn.dyn_eval, mean, cov, np.atleast_1d(time), theta_dyn) if self.mod_dyn.noise_additive: self.x_cov_pr += self.G.dot(self.q_cov).dot(self.G.T) # in non-additive case, augment mean and covariance mean = self.x_mean_pr if self.mod_obs.noise_additive else np.hstack((self.x_mean_pr, self.r_mean)) cov = self.x_cov_pr if self.mod_obs.noise_additive else block_diag(self.x_cov_pr, self.r_cov) assert mean.ndim == 1 and cov.ndim == 2 # apply moment transform to compute measurement mean, covariance self.y_mean_pr, self.y_cov_pr, self.xy_cov = self.tf_obs.apply(self.mod_obs.meas_eval, mean, cov, np.atleast_1d(time), theta_obs) # in additive case, noise covariances need to be added if self.mod_obs.noise_additive: self.y_cov_pr += self.r_cov # in non-additive case, cross-covariances must be trimmed (has no effect in additive case) self.xy_cov = self.xy_cov[:, :self.mod_dyn.dim_state] self.xx_cov = self.xx_cov[:, :self.mod_dyn.dim_state]
Python
def _measurement_update(self, y, time=None): """ Measurement update for Gaussian filters, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Notes ----- Implements general Gaussian filter measurement update in the form .. math:: \[ G_k = P^{xy}_{k|k-1}(P^y_{k|k-1})^{-1} m^x_{k|k} = m^x_{k|k-1} + G_k (y_k - m^y_{k|k-1}) P^x_{k|k} = P^x_{k|k-1} - G_k P^y_{k|k-1} G^T_k \] """ gain = cho_solve(cho_factor(self.y_cov_pr), self.xy_cov).T self.x_mean_fi = self.x_mean_pr + gain.dot(y - self.y_mean_pr) self.x_cov_fi = self.x_cov_pr - gain.dot(self.y_cov_pr).dot(gain.T)
def _measurement_update(self, y, time=None): """ Measurement update for Gaussian filters, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Notes ----- Implements general Gaussian filter measurement update in the form .. math:: \[ G_k = P^{xy}_{k|k-1}(P^y_{k|k-1})^{-1} m^x_{k|k} = m^x_{k|k-1} + G_k (y_k - m^y_{k|k-1}) P^x_{k|k} = P^x_{k|k-1} - G_k P^y_{k|k-1} G^T_k \] """ gain = cho_solve(cho_factor(self.y_cov_pr), self.xy_cov).T self.x_mean_fi = self.x_mean_pr + gain.dot(y - self.y_mean_pr) self.x_cov_fi = self.x_cov_pr - gain.dot(self.y_cov_pr).dot(gain.T)
Python
def _smoothing_update(self): """ Smoothing update, which takes filtered states and predictive states from the forward pass and goes backward in time producing moments of the smoothed system state. Notes ----- Implements general Gaussian Rauch-Tung-Striebel smoothing update equations in the form .. math:: \[ D_{k+1} = P^{xx}_{k+1|K}(P^x{k+1|k})^{-1} m^x_{k|K} = m^x_{k|k} + D_{k+1} (m^x_{k+1|K} - m^x_{k+1|k}) P^x_{k|K} = P^x_{k|k} + D_{k+1} (P^x_{k+1|K} - P^x_{k+1|k}) D^T_{k+1} \] """ gain = cho_solve(cho_factor(self.x_cov_pr), self.xx_cov).T self.x_mean_sm = self.x_mean_fi + gain.dot(self.x_mean_sm - self.x_mean_pr) self.x_cov_sm = self.x_cov_fi + gain.dot(self.x_cov_sm - self.x_cov_pr).dot(gain.T)
def _smoothing_update(self): """ Smoothing update, which takes filtered states and predictive states from the forward pass and goes backward in time producing moments of the smoothed system state. Notes ----- Implements general Gaussian Rauch-Tung-Striebel smoothing update equations in the form .. math:: \[ D_{k+1} = P^{xx}_{k+1|K}(P^x{k+1|k})^{-1} m^x_{k|K} = m^x_{k|k} + D_{k+1} (m^x_{k+1|K} - m^x_{k+1|k}) P^x_{k|K} = P^x_{k|k} + D_{k+1} (P^x_{k+1|K} - P^x_{k+1|k}) D^T_{k+1} \] """ gain = cho_solve(cho_factor(self.x_cov_pr), self.xx_cov).T self.x_mean_sm = self.x_mean_fi + gain.dot(self.x_mean_sm - self.x_mean_pr) self.x_cov_sm = self.x_cov_fi + gain.dot(self.x_cov_sm - self.x_cov_pr).dot(gain.T)
Python
def reset(self): """Reset internal variables and flags.""" self.x_mean_fi, self.x_cov_fi, self.dof_fi = self.x0_mean, self.x0_cov, self.x0_dof scale = (self.dof - 2) / self.dof self.x_smat_fi = scale * self.x_cov_fi self.x_smat_pr, self.y_smat_pr, self.xy_smat = None, None, None super(StudentianInference, self).reset()
def reset(self): """Reset internal variables and flags.""" self.x_mean_fi, self.x_cov_fi, self.dof_fi = self.x0_mean, self.x0_cov, self.x0_dof scale = (self.dof - 2) / self.dof self.x_smat_fi = scale * self.x_cov_fi self.x_smat_pr, self.y_smat_pr, self.xy_smat = None, None, None super(StudentianInference, self).reset()
Python
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Time update for Studentian filters and smoothers, computing predictive moments of state and measurement. Parameters ---------- time : int Time step. Important for t-variant systems. theta_dyn : ndarray Parameters of the moment transform computing the predictive state moments. theta_obs : ndarray Parameters of the moment transform computing the predictive measurement moments. """ if self.fixed_dof: # fixed-DOF version # pick the smallest DOF dof_pr = np.min((self.dof_fi, self.q_dof, self.r_dof)) # rescale filtered scale matrix? scale = (dof_pr - 2) / dof_pr # self.x_smat_fi = self.x_smat_fi * scale * self.dof_fi / (self.dof_fi - 2) else: # increasing DOF version scale = (self.dof - 2) / self.dof # in non-additive case, augment mean and covariance mean = self.x_mean_fi if self.mod_dyn.noise_additive else np.hstack((self.x_mean_fi, self.q_mean)) smat = self.x_smat_fi if self.mod_dyn.noise_additive else block_diag(self.x_smat_fi, self.q_smat) assert mean.ndim == 1 and smat.ndim == 2 # predicted state statistics # TODO: make the moment transforms take covariance matrix (instead of scale) self.x_mean_pr, self.x_cov_pr, self.xx_cov = self.tf_dyn.apply(self.mod_dyn.dyn_eval, mean, smat, np.atleast_1d(time), theta_dyn) # predicted covariance -> predicted scale matrix self.x_smat_pr = scale * self.x_cov_pr if self.mod_dyn.noise_additive: self.x_cov_pr += self.q_gain.dot(self.q_cov).dot(self.q_gain.T) self.x_smat_pr += self.q_gain.dot(self.q_smat).dot(self.q_gain.T) # in non-additive case, augment mean and covariance mean = self.x_mean_pr if self.mod_obs.noise_additive else np.hstack((self.x_mean_pr, self.r_mean)) smat = self.x_smat_pr if self.mod_obs.noise_additive else block_diag(self.x_smat_pr, self.r_smat) assert mean.ndim == 1 and smat.ndim == 2 # predicted measurement statistics self.y_mean_pr, self.y_cov_pr, self.xy_cov = self.tf_obs.apply(self.mod_obs.meas_eval, mean, smat, np.atleast_1d(time), theta_obs) # turn covariance to scale matrix self.y_smat_pr = scale * self.y_cov_pr self.xy_smat = scale * self.xy_cov # in additive case, noise covariances need to be added if self.mod_obs.noise_additive: self.y_cov_pr += self.r_cov self.y_smat_pr += self.r_smat # in non-additive case, cross-covariances must be trimmed (has no effect in additive case) self.xy_cov = self.xy_cov[:, :self.mod_dyn.dim_in] self.xx_cov = self.xx_cov[:, :self.mod_dyn.dim_in] self.xy_smat = self.xy_smat[:, :self.mod_dyn.dim_in]
def _time_update(self, time, theta_dyn=None, theta_obs=None): """ Time update for Studentian filters and smoothers, computing predictive moments of state and measurement. Parameters ---------- time : int Time step. Important for t-variant systems. theta_dyn : ndarray Parameters of the moment transform computing the predictive state moments. theta_obs : ndarray Parameters of the moment transform computing the predictive measurement moments. """ if self.fixed_dof: # fixed-DOF version # pick the smallest DOF dof_pr = np.min((self.dof_fi, self.q_dof, self.r_dof)) # rescale filtered scale matrix? scale = (dof_pr - 2) / dof_pr # self.x_smat_fi = self.x_smat_fi * scale * self.dof_fi / (self.dof_fi - 2) else: # increasing DOF version scale = (self.dof - 2) / self.dof # in non-additive case, augment mean and covariance mean = self.x_mean_fi if self.mod_dyn.noise_additive else np.hstack((self.x_mean_fi, self.q_mean)) smat = self.x_smat_fi if self.mod_dyn.noise_additive else block_diag(self.x_smat_fi, self.q_smat) assert mean.ndim == 1 and smat.ndim == 2 # predicted state statistics # TODO: make the moment transforms take covariance matrix (instead of scale) self.x_mean_pr, self.x_cov_pr, self.xx_cov = self.tf_dyn.apply(self.mod_dyn.dyn_eval, mean, smat, np.atleast_1d(time), theta_dyn) # predicted covariance -> predicted scale matrix self.x_smat_pr = scale * self.x_cov_pr if self.mod_dyn.noise_additive: self.x_cov_pr += self.q_gain.dot(self.q_cov).dot(self.q_gain.T) self.x_smat_pr += self.q_gain.dot(self.q_smat).dot(self.q_gain.T) # in non-additive case, augment mean and covariance mean = self.x_mean_pr if self.mod_obs.noise_additive else np.hstack((self.x_mean_pr, self.r_mean)) smat = self.x_smat_pr if self.mod_obs.noise_additive else block_diag(self.x_smat_pr, self.r_smat) assert mean.ndim == 1 and smat.ndim == 2 # predicted measurement statistics self.y_mean_pr, self.y_cov_pr, self.xy_cov = self.tf_obs.apply(self.mod_obs.meas_eval, mean, smat, np.atleast_1d(time), theta_obs) # turn covariance to scale matrix self.y_smat_pr = scale * self.y_cov_pr self.xy_smat = scale * self.xy_cov # in additive case, noise covariances need to be added if self.mod_obs.noise_additive: self.y_cov_pr += self.r_cov self.y_smat_pr += self.r_smat # in non-additive case, cross-covariances must be trimmed (has no effect in additive case) self.xy_cov = self.xy_cov[:, :self.mod_dyn.dim_in] self.xx_cov = self.xx_cov[:, :self.mod_dyn.dim_in] self.xy_smat = self.xy_smat[:, :self.mod_dyn.dim_in]
Python
def _measurement_update(self, y, time=None): """ Measurement update for Studentian filters, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Notes ----- Implements general Studentian filter measurement update. """ # scale the covariance matrices # scale = (self.dof - 2) / self.dof # self.y_cov_pr *= scale # self.xy_cov *= scale # Kalman update gain = cho_solve(cho_factor(self.y_smat_pr), self.xy_smat).T self.x_mean_fi = self.x_mean_pr + gain.dot(y - self.y_mean_pr) # FIXME: this isn't covariance (shouldn't be saved in x_cov_fi) self.x_cov_fi = self.x_smat_pr - gain.dot(self.y_smat_pr).dot(gain.T) # filtered covariance to filtered scale matrix # delta = cho_solve(cho_factor(self.y_smat_pr), y - self.y_mean_pr) delta = la.solve(la.cholesky(self.y_smat_pr), y - self.y_mean_pr) scale = (self.dof + delta.T.dot(delta)) / (self.dof + self.mod_obs.dim_out) self.x_smat_fi = scale * self.x_cov_fi # update degrees of freedom self.dof_fi += self.mod_obs.dim_out
def _measurement_update(self, y, time=None): """ Measurement update for Studentian filters, which takes predictive state and measurement moments and produces filtered state mean and covariance. Parameters ---------- y : (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. Notes ----- Implements general Studentian filter measurement update. """ # scale the covariance matrices # scale = (self.dof - 2) / self.dof # self.y_cov_pr *= scale # self.xy_cov *= scale # Kalman update gain = cho_solve(cho_factor(self.y_smat_pr), self.xy_smat).T self.x_mean_fi = self.x_mean_pr + gain.dot(y - self.y_mean_pr) # FIXME: this isn't covariance (shouldn't be saved in x_cov_fi) self.x_cov_fi = self.x_smat_pr - gain.dot(self.y_smat_pr).dot(gain.T) # filtered covariance to filtered scale matrix # delta = cho_solve(cho_factor(self.y_smat_pr), y - self.y_mean_pr) delta = la.solve(la.cholesky(self.y_smat_pr), y - self.y_mean_pr) scale = (self.dof + delta.T.dot(delta)) / (self.dof + self.mod_obs.dim_out) self.x_smat_fi = scale * self.x_cov_fi # update degrees of freedom self.dof_fi += self.mod_obs.dim_out
Python
def reset(self): """Reset internal variables and flags.""" super(MarginalInference, self).reset() # Reset parameter moments to prior moments self.param_mean = self.param_prior_mean self.param_cov = self.param_prior_cov
def reset(self): """Reset internal variables and flags.""" super(MarginalInference, self).reset() # Reset parameter moments to prior moments self.param_mean = self.param_prior_mean self.param_cov = self.param_prior_cov
Python
def _measurement_update(self, y, time=None): """ Computes the posterior state mean and covariance by marginalizing out the moment transform parameters. Procedure has two steps: 1. Compute Laplace approximation of the GPQ parameter posterior 2. Use fully-symmetric quadrature rule to compute posterior state mean and covariance by marginalizing out the GPQ-parameters over the approximated posterior. Parameters ---------- y: (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. """ # Mean and covariance of the parameter posterior by Laplace approximation self._param_posterior_moments(y, time) # Marginalization of moment transform parameters param_cov_chol = la.cholesky(self.param_cov) param_pts = self.param_mean[:, na] + param_cov_chol.dot(self.param_upts) mean = np.zeros((self.mod_dyn.dim_in, self.param_pts_num)) cov = np.zeros((self.mod_dyn.dim_in, self.mod_dyn.dim_in, self.param_pts_num)) # Evaluate state posterior with different values of transform parameters for i in range(self.param_pts_num): # FIXME: fcn recomputes predictive estimates (x_mean_pr, x_cov_pr, ...) # FIXME: predictive moments should be computed by quadrature, based on param prior mean[:, i], cov[:, :, i] = self._state_posterior_moments(param_pts[:, i], y, time) # Weighted sum of means and covariances approximates Gaussian mixture state posterior self.x_mean_fi = np.einsum('ij, j -> i', mean, self.param_wts) self.x_cov_fi = np.einsum('ijk, k -> ij', cov, self.param_wts)
def _measurement_update(self, y, time=None): """ Computes the posterior state mean and covariance by marginalizing out the moment transform parameters. Procedure has two steps: 1. Compute Laplace approximation of the GPQ parameter posterior 2. Use fully-symmetric quadrature rule to compute posterior state mean and covariance by marginalizing out the GPQ-parameters over the approximated posterior. Parameters ---------- y: (dim, ) ndarray Measurement vector. time : int Time step. Important for t-variant systems. """ # Mean and covariance of the parameter posterior by Laplace approximation self._param_posterior_moments(y, time) # Marginalization of moment transform parameters param_cov_chol = la.cholesky(self.param_cov) param_pts = self.param_mean[:, na] + param_cov_chol.dot(self.param_upts) mean = np.zeros((self.mod_dyn.dim_in, self.param_pts_num)) cov = np.zeros((self.mod_dyn.dim_in, self.mod_dyn.dim_in, self.param_pts_num)) # Evaluate state posterior with different values of transform parameters for i in range(self.param_pts_num): # FIXME: fcn recomputes predictive estimates (x_mean_pr, x_cov_pr, ...) # FIXME: predictive moments should be computed by quadrature, based on param prior mean[:, i], cov[:, :, i] = self._state_posterior_moments(param_pts[:, i], y, time) # Weighted sum of means and covariances approximates Gaussian mixture state posterior self.x_mean_fi = np.einsum('ij, j -> i', mean, self.param_wts) self.x_cov_fi = np.einsum('ijk, k -> ij', cov, self.param_wts)
Python
def _param_log_prior(self, theta): """ Prior on transform parameters :math:`p(\\theta) = N(\\theta | m^{\\theta}_k-1, P^{\\theta}_k-1)`. Parameters ---------- theta : ndarray Vector of transform parameters. Returns ------- log_prob : (ndarray or scalar) Log of the probability density function evaluated at theta Notes ----- At the moment, only Gaussian prior is supported. Student-t prior might be implemented in the future. """ return multivariate_normal.logpdf(theta, self.param_mean, self.param_cov)
def _param_log_prior(self, theta): """ Prior on transform parameters :math:`p(\\theta) = N(\\theta | m^{\\theta}_k-1, P^{\\theta}_k-1)`. Parameters ---------- theta : ndarray Vector of transform parameters. Returns ------- log_prob : (ndarray or scalar) Log of the probability density function evaluated at theta Notes ----- At the moment, only Gaussian prior is supported. Student-t prior might be implemented in the future. """ return multivariate_normal.logpdf(theta, self.param_mean, self.param_cov)
Python
def weights(self, par, *args): """ Bayesian quadrature weights. Parameters ---------- par : ndarray Kernel parameters to use in computation of the weights. args : tuple Other relevant parameters. Returns ------- : tuple Weights for the mean, covariance and cross-covariance quadrature approximations. """ wm, wc, wcc, emv, ivar = self.model.bq_weights(par, *args) return wm, wc, wcc
def weights(self, par, *args): """ Bayesian quadrature weights. Parameters ---------- par : ndarray Kernel parameters to use in computation of the weights. args : tuple Other relevant parameters. Returns ------- : tuple Weights for the mean, covariance and cross-covariance quadrature approximations. """ wm, wc, wcc, emv, ivar = self.model.bq_weights(par, *args) return wm, wc, wcc
Python
def _fcn_eval(self, fcn, x, fcn_par): """ Evaluations of the integrand, which can comprise function observations as well as derivative observations. Parameters ---------- fcn : func Integrand as a function handle, which is expected to behave certain way. x : ndarray Argument (input) of the integrand. fcn_par : Parameters of the integrand. Returns ------- : ndarray Function evaluations of shape (out_dim, num_pts). Notes ----- Methods in derived subclasses decides whether to return derivatives also """ return np.apply_along_axis(fcn, 0, x, fcn_par)
def _fcn_eval(self, fcn, x, fcn_par): """ Evaluations of the integrand, which can comprise function observations as well as derivative observations. Parameters ---------- fcn : func Integrand as a function handle, which is expected to behave certain way. x : ndarray Argument (input) of the integrand. fcn_par : Parameters of the integrand. Returns ------- : ndarray Function evaluations of shape (out_dim, num_pts). Notes ----- Methods in derived subclasses decides whether to return derivatives also """ return np.apply_along_axis(fcn, 0, x, fcn_par)
Python
def _mean(self, weights, fcn_evals): """ Transformed mean for the multi-output GPQ. Parameters ---------- weights : numpy.ndarray fcn_evals : numpy.ndarray Notes ----- Problems with implementation. Can't get the results to match the results of single-output GPQ transform. I strongly suspect this is caused by the inconsistent results from numpy.einsum and numpy.dot. Returns ------- : numpy.ndarray """ # return np.einsum('ij, ji -> i', fcn_evals, weights) for i in range(self.model.dim_out): self.tmean[i] = fcn_evals[i, :].dot(weights[:, i]) return self.tmean
def _mean(self, weights, fcn_evals): """ Transformed mean for the multi-output GPQ. Parameters ---------- weights : numpy.ndarray fcn_evals : numpy.ndarray Notes ----- Problems with implementation. Can't get the results to match the results of single-output GPQ transform. I strongly suspect this is caused by the inconsistent results from numpy.einsum and numpy.dot. Returns ------- : numpy.ndarray """ # return np.einsum('ij, ji -> i', fcn_evals, weights) for i in range(self.model.dim_out): self.tmean[i] = fcn_evals[i, :].dot(weights[:, i]) return self.tmean
Python
def expected_eval(xs, x, alpha=10.0, el=0.7, which_der=None): """RBF kernel w/ derivatives.""" x, xs = np.atleast_2d(x), np.atleast_2d(xs) D, N = x.shape Ds, Ns = xs.shape assert Ds == D which_der = np.arange(N) if which_der is None else which_der Nd = len(which_der) # points w/ derivative observations # extract hypers # alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] iLam = np.diag(el ** -1 * np.ones(D)) iiLam = np.diag(el ** -2 * np.ones(D)) x = iLam.dot(x) # sqrt(Lambda^-1) * X xs = iLam.dot(xs) Kff = np.exp(2 * np.log(alpha) - 0.5 * maha(xs.T, x.T)) # cov(f(xi), f(xj)) x = iLam.dot(x) # Lambda^-1 * X xs = iLam.dot(xs) XmX = xs[..., na] - x[:, na, :] # pair-wise differences Kfd = np.zeros((Ns, D * Nd)) # cov(f(xi), df(xj)) Kdd = np.zeros((D * Nd, D * Nd)) # cov(df(xi), df(xj)) for i in range(Ns): for j in range(Nd): jstart, jend = j * D, j * D + D j_d = which_der[j] Kfd[i, jstart:jend] = Kff[i, j_d] * XmX[:, i, j_d] for i in range(Nd): for j in range(Nd): istart, iend = i * D, i * D + D jstart, jend = j * D, j * D + D i_d, j_d = which_der[i], which_der[j] # indices of points with derivatives Kdd[istart:iend, jstart:jend] = Kff[i_d, j_d] * (iiLam - np.outer(XmX[:, i_d, j_d], XmX[:, i_d, j_d])) if Ns == N: return np.vstack((np.hstack((Kff, Kfd)), np.hstack((Kfd.T, Kdd)))) else: return np.hstack((Kff, Kfd))
def expected_eval(xs, x, alpha=10.0, el=0.7, which_der=None): """RBF kernel w/ derivatives.""" x, xs = np.atleast_2d(x), np.atleast_2d(xs) D, N = x.shape Ds, Ns = xs.shape assert Ds == D which_der = np.arange(N) if which_der is None else which_der Nd = len(which_der) # points w/ derivative observations # extract hypers # alpha, el, jitter = hypers['sig_var'], hypers['lengthscale'], hypers['noise_var'] iLam = np.diag(el ** -1 * np.ones(D)) iiLam = np.diag(el ** -2 * np.ones(D)) x = iLam.dot(x) # sqrt(Lambda^-1) * X xs = iLam.dot(xs) Kff = np.exp(2 * np.log(alpha) - 0.5 * maha(xs.T, x.T)) # cov(f(xi), f(xj)) x = iLam.dot(x) # Lambda^-1 * X xs = iLam.dot(xs) XmX = xs[..., na] - x[:, na, :] # pair-wise differences Kfd = np.zeros((Ns, D * Nd)) # cov(f(xi), df(xj)) Kdd = np.zeros((D * Nd, D * Nd)) # cov(df(xi), df(xj)) for i in range(Ns): for j in range(Nd): jstart, jend = j * D, j * D + D j_d = which_der[j] Kfd[i, jstart:jend] = Kff[i, j_d] * XmX[:, i, j_d] for i in range(Nd): for j in range(Nd): istart, iend = i * D, i * D + D jstart, jend = j * D, j * D + D i_d, j_d = which_der[i], which_der[j] # indices of points with derivatives Kdd[istart:iend, jstart:jend] = Kff[i_d, j_d] * (iiLam - np.outer(XmX[:, i_d, j_d], XmX[:, i_d, j_d])) if Ns == N: return np.vstack((np.hstack((Kff, Kfd)), np.hstack((Kfd.T, Kdd)))) else: return np.hstack((Kff, Kfd))
Python
def gauss_mixture(means, covs, alphas, size): """ Draw samples from Gaussian mixture. Parameters ---------- means : tuple of ndarrays Mean for each of the mixture components. covs : tuple of ndarrays Covariance for each of the mixture components. alphas : 1d ndarray Mixing proportions, must have same length as means and covs. size : int or tuple of ints #TODO: tuple of ints not yet handled. Number of samples to draw or shape of the output array containing samples. Returns ------- samples : ndarray Samples from the Gaussian mixture. indexes : ndarray Component of indices corresponding to samples in """ if len(means) != len(covs) or len(covs) != len(alphas): raise ValueError('means, covs and alphas need to have the same length.') n_samples = np.prod(size) n_dim = len(means[0]) # draw from discrete distribution according to the mixing proportions ci = np.random.choice(np.arange(len(alphas)), p=alphas, size=size) ci_counts = np.unique(ci, return_counts=True)[1] # draw samples from each of the component Gaussians samples = np.empty((n_samples, n_dim)) indexes = np.empty(n_samples, dtype=int) start = 0 for ind, c in enumerate(ci_counts): end = start + c samples[start:end, :] = np.random.multivariate_normal(means[ind], covs[ind], size=c) indexes[start:end] = ind start = end from sklearn.utils import shuffle return shuffle(samples, indexes)
def gauss_mixture(means, covs, alphas, size): """ Draw samples from Gaussian mixture. Parameters ---------- means : tuple of ndarrays Mean for each of the mixture components. covs : tuple of ndarrays Covariance for each of the mixture components. alphas : 1d ndarray Mixing proportions, must have same length as means and covs. size : int or tuple of ints #TODO: tuple of ints not yet handled. Number of samples to draw or shape of the output array containing samples. Returns ------- samples : ndarray Samples from the Gaussian mixture. indexes : ndarray Component of indices corresponding to samples in """ if len(means) != len(covs) or len(covs) != len(alphas): raise ValueError('means, covs and alphas need to have the same length.') n_samples = np.prod(size) n_dim = len(means[0]) # draw from discrete distribution according to the mixing proportions ci = np.random.choice(np.arange(len(alphas)), p=alphas, size=size) ci_counts = np.unique(ci, return_counts=True)[1] # draw samples from each of the component Gaussians samples = np.empty((n_samples, n_dim)) indexes = np.empty(n_samples, dtype=int) start = 0 for ind, c in enumerate(ci_counts): end = start + c samples[start:end, :] = np.random.multivariate_normal(means[ind], covs[ind], size=c) indexes[start:end] = ind start = end from sklearn.utils import shuffle return shuffle(samples, indexes)
Python
def multivariate_t(mean, scale, nu, size): """ Samples from a multivariate Student's t-distribution. Samples of a random variable :math:`X` following a multivariate t-distribution :math:`X \\sim \\mathrm{St}(\\mu, \\Sigma, \\nu)`. Parameters ---------- mean : (dim_x, ) ndarray Mean vector. scale : (dim_x, dim_x) ndarray Scale matrix. nu : float Degrees of freedom. size : int or tuple of ints Number of samples to draw, gets passed into Numpy's random number generators. Returns ------- : ndarray Samples of a multivariate Student's t-distribution with two components. Notes ----- If :math:`y \\sim \\mathrm{N}(0, \\Sigma)` and :math:`u \\sim \\mathrm{Gamma}(k=\\nu/2, \\theta=2/\\nu)`, then :math:`x \\sim \\mathrm{St}(\\mu, \\Sigma, \\nu)`, where :math:`x = \\mu + y\\frac{1}{\\sqrt{u}}`. """ v = np.random.gamma(nu / 2, 2 / nu, size)[:, na] n = np.random.multivariate_normal(np.zeros_like(mean), scale, size) return mean[na, :] + n / np.sqrt(v)
def multivariate_t(mean, scale, nu, size): """ Samples from a multivariate Student's t-distribution. Samples of a random variable :math:`X` following a multivariate t-distribution :math:`X \\sim \\mathrm{St}(\\mu, \\Sigma, \\nu)`. Parameters ---------- mean : (dim_x, ) ndarray Mean vector. scale : (dim_x, dim_x) ndarray Scale matrix. nu : float Degrees of freedom. size : int or tuple of ints Number of samples to draw, gets passed into Numpy's random number generators. Returns ------- : ndarray Samples of a multivariate Student's t-distribution with two components. Notes ----- If :math:`y \\sim \\mathrm{N}(0, \\Sigma)` and :math:`u \\sim \\mathrm{Gamma}(k=\\nu/2, \\theta=2/\\nu)`, then :math:`x \\sim \\mathrm{St}(\\mu, \\Sigma, \\nu)`, where :math:`x = \\mu + y\\frac{1}{\\sqrt{u}}`. """ v = np.random.gamma(nu / 2, 2 / nu, size)[:, na] n = np.random.multivariate_normal(np.zeros_like(mean), scale, size) return mean[na, :] + n / np.sqrt(v)
Python
def n_sum_k(n, k): """Generates all n-tuples summing to k.""" assert k >= 0 if k == 0: return np.zeros((n, 1), dtype=np.int) if k == 1: return np.eye(n, dtype=np.int) else: a = n_sum_k(n, k - 1) I = np.eye(n, dtype=np.int) temp = np.zeros((n, (n * (1 + n) // 2) - 1), dtype=np.int) tind = 0 for i in range(n - 1): for j in range(i, n): temp[:, tind] = a[:, i] + I[:, j] tind = tind + 1 return np.hstack((temp, a[:, n - 1:] + I[:, -1, None]))
def n_sum_k(n, k): """Generates all n-tuples summing to k.""" assert k >= 0 if k == 0: return np.zeros((n, 1), dtype=np.int) if k == 1: return np.eye(n, dtype=np.int) else: a = n_sum_k(n, k - 1) I = np.eye(n, dtype=np.int) temp = np.zeros((n, (n * (1 + n) // 2) - 1), dtype=np.int) tind = 0 for i in range(n - 1): for j in range(i, n): temp[:, tind] = a[:, i] + I[:, j] tind = tind + 1 return np.hstack((temp, a[:, n - 1:] + I[:, -1, None]))
Python
def load_from_json(self, file_name: str) -> bool: """ Loads a graph from a json file. @param file_name: The path to the json file @returns True if the loading was successful, False o.w. """ raise NotImplementedError
def load_from_json(self, file_name: str) -> bool: """ Loads a graph from a json file. @param file_name: The path to the json file @returns True if the loading was successful, False o.w. """ raise NotImplementedError
Python
def save_to_json(self, file_name: str) -> bool: """ Saves the graph in JSON format to a file @param file_name: The path to the out file @return: True if the save was successful, False o.w. """ raise NotImplementedError
def save_to_json(self, file_name: str) -> bool: """ Saves the graph in JSON format to a file @param file_name: The path to the out file @return: True if the save was successful, False o.w. """ raise NotImplementedError
Python
def TSP(self, node_lst: List[int]) -> (List[int], float): """ Finds the shortest path that visits all the nodes in the list :param node_lst: A list of nodes id's :return: A list of the nodes id's in the path, and the overall distance """
def TSP(self, node_lst: List[int]) -> (List[int], float): """ Finds the shortest path that visits all the nodes in the list :param node_lst: A list of nodes id's :return: A list of the nodes id's in the path, and the overall distance """
Python
def plot_graph(self) -> None: """ Plots the graph. If the nodes have a position, the nodes will be placed there. Otherwise, they will be placed in a random but elegant manner. @return: None """ raise NotImplementedError
def plot_graph(self) -> None: """ Plots the graph. If the nodes have a position, the nodes will be placed there. Otherwise, they will be placed in a random but elegant manner. @return: None """ raise NotImplementedError
Python
def load_from_file(self, file: str): """ Load grid from the specified file :param file: relative path from __main__.py to the file """ try: with open(file) as f: content = [[True if c in ('1', 'o', 'O') else False if c in ('0', '.', '_') else quit( f"there is an illegal character '{c}' in the '{file}' file") for c in line.strip()] for line in f] except FileNotFoundError: quit(f"File '{file}' was not found!") lengths = [len(row) for row in content] max_len = max(lengths) for row in content: diff = max_len - len(row) row.extend([False] * diff) if diff > 0 else None content = list(map(list, zip(*content))) # from [y][x] to [x][y] self.cell_size = int(min(self.width / len(content), (self.height - MENU_HEIGHT) / len(content[0]))) if self.cell_size < MIN_CELL_SIZE: quit(f"Cell size is too small: '{self.cell_size}' change min: '{MIN_CELL_SIZE} or modify num of rows/cols!") self.grid_width = int(self.width / self.cell_size) self.grid_height = int((self.height - MENU_HEIGHT) / self.cell_size) # adding cols and rows to center loaded pattern [content.insert(0, [False] * len(content[0])) for _ in range((self.grid_width - len(content)) // 2)] [[content[x].insert(0, False) for _ in range((self.grid_height - len(content[-1])) // 2)] for x in range(len(content))] self.generation = 0 self.sprites = pg.sprite.Group() self.cells = [[Cell(self, self.cell_size, x, y, color=BLACK, alive=True) if x < len(content) and y < len(content[0]) and content[x][y] else Cell(self, self.cell_size, x, y, color=WHITE) for y in range(self.grid_height)] for x in range(self.grid_width)]
def load_from_file(self, file: str): """ Load grid from the specified file :param file: relative path from __main__.py to the file """ try: with open(file) as f: content = [[True if c in ('1', 'o', 'O') else False if c in ('0', '.', '_') else quit( f"there is an illegal character '{c}' in the '{file}' file") for c in line.strip()] for line in f] except FileNotFoundError: quit(f"File '{file}' was not found!") lengths = [len(row) for row in content] max_len = max(lengths) for row in content: diff = max_len - len(row) row.extend([False] * diff) if diff > 0 else None content = list(map(list, zip(*content))) # from [y][x] to [x][y] self.cell_size = int(min(self.width / len(content), (self.height - MENU_HEIGHT) / len(content[0]))) if self.cell_size < MIN_CELL_SIZE: quit(f"Cell size is too small: '{self.cell_size}' change min: '{MIN_CELL_SIZE} or modify num of rows/cols!") self.grid_width = int(self.width / self.cell_size) self.grid_height = int((self.height - MENU_HEIGHT) / self.cell_size) # adding cols and rows to center loaded pattern [content.insert(0, [False] * len(content[0])) for _ in range((self.grid_width - len(content)) // 2)] [[content[x].insert(0, False) for _ in range((self.grid_height - len(content[-1])) // 2)] for x in range(len(content))] self.generation = 0 self.sprites = pg.sprite.Group() self.cells = [[Cell(self, self.cell_size, x, y, color=BLACK, alive=True) if x < len(content) and y < len(content[0]) and content[x][y] else Cell(self, self.cell_size, x, y, color=WHITE) for y in range(self.grid_height)] for x in range(self.grid_width)]
Python
def save_to_file(self) -> str: """ Create a file to which the current grid will be saved :return: name of the created file """ Path(SAVES).mkdir(parents=True, exist_ok=True) filename = SAVES + datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')[:-3] + ".txt" with open(filename, 'w') as f: for y in range(len(self.cells[0])): for x in range(len(self.cells)): f.write('1' if self.cells[x][y].alive else '.') f.write('\n') return filename
def save_to_file(self) -> str: """ Create a file to which the current grid will be saved :return: name of the created file """ Path(SAVES).mkdir(parents=True, exist_ok=True) filename = SAVES + datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')[:-3] + ".txt" with open(filename, 'w') as f: for y in range(len(self.cells[0])): for x in range(len(self.cells)): f.write('1' if self.cells[x][y].alive else '.') f.write('\n') return filename
Python
def new(self, action: Action = Action.INIT, file: str = None): """ Called when it is necessary to recreate the grid :param action: This parameter is just passed to the create_list function :param file: path to the pattern file or None """ if not file: self.sprites = pg.sprite.Group() self.grid_width = int(self.width / self.cell_size) self.grid_height = int((self.height - MENU_HEIGHT) / self.cell_size) self.create_list(action) self.margin_x = int((self.width - self.grid_width * self.cell_size) / 2) self.grid_image = pg.Surface([self.grid_width * self.cell_size + 1, self.grid_height * self.cell_size + 1]) self.draw_grid(self.grid_color) self.grid_image.fill(WHITE) self.screen.fill(WHITE)
def new(self, action: Action = Action.INIT, file: str = None): """ Called when it is necessary to recreate the grid :param action: This parameter is just passed to the create_list function :param file: path to the pattern file or None """ if not file: self.sprites = pg.sprite.Group() self.grid_width = int(self.width / self.cell_size) self.grid_height = int((self.height - MENU_HEIGHT) / self.cell_size) self.create_list(action) self.margin_x = int((self.width - self.grid_width * self.cell_size) / 2) self.grid_image = pg.Surface([self.grid_width * self.cell_size + 1, self.grid_height * self.cell_size + 1]) self.draw_grid(self.grid_color) self.grid_image.fill(WHITE) self.screen.fill(WHITE)
Python
def create_list(self, action: Action): """ Creates a list of Cell type objects, depending on the action - the old list could be copied :param action: DECREASE- when new grid will be smaller, INCREASE - when new grid will be bigger or INIT if there is no need to copy Cell states of the old grid. """ if action is Action.INIT: # just create new list of cells with random states self.cells = [[Cell(self, self.cell_size, x, y) for y in range(self.grid_height)] for x in range(self.grid_width)] self.fill_grid() elif action is Action.INCREASE: # extend the existing list by copying cells from the old list and adding new dead cells to the rest of # the indexes self.cells = [[Cell(self, self.cell_size, x, y, color=self.cells[x][y].color, alive=self.cells[x][y].alive) if x < len(self.cells) and y < len(self.cells[0]) else Cell(self, self.cell_size, x, y, color=WHITE) for y in range(self.grid_height)] for x in range(self.grid_width)] elif action is Action.DECREASE: # copy bigger list to the smaller one (so copies only cells which will fit into the new one) - by new lower # indexes - grid_width and grid_height self.cells = [[Cell(self, self.cell_size, x, y, color=self.cells[x][y].color, alive=self.cells[x][y].alive) for y in range(self.grid_height)] for x in range(self.grid_width)]
def create_list(self, action: Action): """ Creates a list of Cell type objects, depending on the action - the old list could be copied :param action: DECREASE- when new grid will be smaller, INCREASE - when new grid will be bigger or INIT if there is no need to copy Cell states of the old grid. """ if action is Action.INIT: # just create new list of cells with random states self.cells = [[Cell(self, self.cell_size, x, y) for y in range(self.grid_height)] for x in range(self.grid_width)] self.fill_grid() elif action is Action.INCREASE: # extend the existing list by copying cells from the old list and adding new dead cells to the rest of # the indexes self.cells = [[Cell(self, self.cell_size, x, y, color=self.cells[x][y].color, alive=self.cells[x][y].alive) if x < len(self.cells) and y < len(self.cells[0]) else Cell(self, self.cell_size, x, y, color=WHITE) for y in range(self.grid_height)] for x in range(self.grid_width)] elif action is Action.DECREASE: # copy bigger list to the smaller one (so copies only cells which will fit into the new one) - by new lower # indexes - grid_width and grid_height self.cells = [[Cell(self, self.cell_size, x, y, color=self.cells[x][y].color, alive=self.cells[x][y].alive) for y in range(self.grid_height)] for x in range(self.grid_width)]
Python
def draw_grid(self, color=GREY): """ Draw the additional grid/net :param color: color of the drawn lines """ self.grid_lines = pg.Surface([self.grid_width * self.cell_size + 1, self.grid_height * self.cell_size + 1], SRCALPHA) self.grid_lines.fill(WHITE + (0,)) width, height = self.grid_width * self.cell_size, self.grid_height * self.cell_size pg.draw.lines(self.grid_lines, GREY, True, ((0, 0), (width, 0), (width, height), (0, height))) # border if color is None: return for x in range(self.cell_size, width, self.cell_size): pg.draw.line(self.grid_lines, color, (x, 1), (x, height - 1)) for y in range(self.cell_size, height, self.cell_size): pg.draw.line(self.grid_lines, color, (1, y), (width - 1, y))
def draw_grid(self, color=GREY): """ Draw the additional grid/net :param color: color of the drawn lines """ self.grid_lines = pg.Surface([self.grid_width * self.cell_size + 1, self.grid_height * self.cell_size + 1], SRCALPHA) self.grid_lines.fill(WHITE + (0,)) width, height = self.grid_width * self.cell_size, self.grid_height * self.cell_size pg.draw.lines(self.grid_lines, GREY, True, ((0, 0), (width, 0), (width, height), (0, height))) # border if color is None: return for x in range(self.cell_size, width, self.cell_size): pg.draw.line(self.grid_lines, color, (x, 1), (x, height - 1)) for y in range(self.cell_size, height, self.cell_size): pg.draw.line(self.grid_lines, color, (1, y), (width - 1, y))
Python
def draw_info(self, color=BLACK, background=WHITE): """ Displaying information about generation and alive cells :param color: color of the drawn text :param background: color of the drawn background """ render = lambda txt: self.font_info.render(txt, False, color, background) text = render(f'Generation: {self.generation}') text2 = render(f'Alive cells: {self.count_alive_cells()}') pg.draw.rect(self.screen, WHITE, (0, self.height - MENU_HEIGHT + 1, self.width, MENU_HEIGHT)) self.screen.blits([(text, (0, self.height - MENU_HEIGHT + 1)), (text2, (self.width - text2.get_size()[0], self.height - MENU_HEIGHT + 1))])
def draw_info(self, color=BLACK, background=WHITE): """ Displaying information about generation and alive cells :param color: color of the drawn text :param background: color of the drawn background """ render = lambda txt: self.font_info.render(txt, False, color, background) text = render(f'Generation: {self.generation}') text2 = render(f'Alive cells: {self.count_alive_cells()}') pg.draw.rect(self.screen, WHITE, (0, self.height - MENU_HEIGHT + 1, self.width, MENU_HEIGHT)) self.screen.blits([(text, (0, self.height - MENU_HEIGHT + 1)), (text2, (self.width - text2.get_size()[0], self.height - MENU_HEIGHT + 1))])