repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
BerkeleyAutomation/autolab_core
autolab_core/experiment_logger.py
ExperimentLogger.gen_experiment_ref
def gen_experiment_ref(experiment_tag, n=10): """ Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref """ experiment_id = gen_experiment_id(n=n) return '{0}_{1}'.format(experiment_tag, experiment_id)
python
def gen_experiment_ref(experiment_tag, n=10): """ Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref """ experiment_id = gen_experiment_id(n=n) return '{0}_{1}'.format(experiment_tag, experiment_id)
[ "def", "gen_experiment_ref", "(", "experiment_tag", ",", "n", "=", "10", ")", ":", "experiment_id", "=", "gen_experiment_id", "(", "n", "=", "n", ")", "return", "'{0}_{1}'", ".", "format", "(", "experiment_tag", ",", "experiment_id", ")" ]
Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref
[ "Generate", "a", "random", "string", "for", "naming", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/experiment_logger.py#L82-L98
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.add
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
python
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
[ "def", "add", "(", "self", ",", "datapoint", ")", ":", "if", "not", "self", ".", "is_full", ":", "self", ".", "set_datapoint", "(", "self", ".", "cur_index", ",", "datapoint", ")", "self", ".", "cur_index", "+=", "1" ]
Adds the datapoint to the tensor if room is available.
[ "Adds", "the", "datapoint", "to", "the", "tensor", "if", "room", "is", "available", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L121-L125
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.add_batch
def add_batch(self, datapoints): """ Adds a batch of datapoints to the tensor if room is available. """ num_datapoints_to_add = datapoints.shape[0] end_index = self.cur_index + num_datapoints_to_add if end_index <= self.num_datapoints: self.data[self.cur_index:end_index,...] = datapoints self.cur_index = end_index
python
def add_batch(self, datapoints): """ Adds a batch of datapoints to the tensor if room is available. """ num_datapoints_to_add = datapoints.shape[0] end_index = self.cur_index + num_datapoints_to_add if end_index <= self.num_datapoints: self.data[self.cur_index:end_index,...] = datapoints self.cur_index = end_index
[ "def", "add_batch", "(", "self", ",", "datapoints", ")", ":", "num_datapoints_to_add", "=", "datapoints", ".", "shape", "[", "0", "]", "end_index", "=", "self", ".", "cur_index", "+", "num_datapoints_to_add", "if", "end_index", "<=", "self", ".", "num_datapoints", ":", "self", ".", "data", "[", "self", ".", "cur_index", ":", "end_index", ",", "...", "]", "=", "datapoints", "self", ".", "cur_index", "=", "end_index" ]
Adds a batch of datapoints to the tensor if room is available.
[ "Adds", "a", "batch", "of", "datapoints", "to", "the", "tensor", "if", "room", "is", "available", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L127-L133
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.datapoint
def datapoint(self, ind): """ Returns the datapoint at the given index. """ if self.height is None: return self.data[ind] return self.data[ind, ...].copy()
python
def datapoint(self, ind): """ Returns the datapoint at the given index. """ if self.height is None: return self.data[ind] return self.data[ind, ...].copy()
[ "def", "datapoint", "(", "self", ",", "ind", ")", ":", "if", "self", ".", "height", "is", "None", ":", "return", "self", ".", "data", "[", "ind", "]", "return", "self", ".", "data", "[", "ind", ",", "...", "]", ".", "copy", "(", ")" ]
Returns the datapoint at the given index.
[ "Returns", "the", "datapoint", "at", "the", "given", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L141-L145
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.set_datapoint
def set_datapoint(self, ind, datapoint): """ Sets the value of the datapoint at the given index. """ if ind >= self.num_datapoints: raise ValueError('Index %d out of bounds! Tensor has %d datapoints' %(ind, self.num_datapoints)) self.data[ind, ...] = np.array(datapoint).astype(self.dtype)
python
def set_datapoint(self, ind, datapoint): """ Sets the value of the datapoint at the given index. """ if ind >= self.num_datapoints: raise ValueError('Index %d out of bounds! Tensor has %d datapoints' %(ind, self.num_datapoints)) self.data[ind, ...] = np.array(datapoint).astype(self.dtype)
[ "def", "set_datapoint", "(", "self", ",", "ind", ",", "datapoint", ")", ":", "if", "ind", ">=", "self", ".", "num_datapoints", ":", "raise", "ValueError", "(", "'Index %d out of bounds! Tensor has %d datapoints'", "%", "(", "ind", ",", "self", ".", "num_datapoints", ")", ")", "self", ".", "data", "[", "ind", ",", "...", "]", "=", "np", ".", "array", "(", "datapoint", ")", ".", "astype", "(", "self", ".", "dtype", ")" ]
Sets the value of the datapoint at the given index.
[ "Sets", "the", "value", "of", "the", "datapoint", "at", "the", "given", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L147-L151
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.data_slice
def data_slice(self, slice_ind): """ Returns a slice of datapoints """ if self.height is None: return self.data[slice_ind] return self.data[slice_ind, ...]
python
def data_slice(self, slice_ind): """ Returns a slice of datapoints """ if self.height is None: return self.data[slice_ind] return self.data[slice_ind, ...]
[ "def", "data_slice", "(", "self", ",", "slice_ind", ")", ":", "if", "self", ".", "height", "is", "None", ":", "return", "self", ".", "data", "[", "slice_ind", "]", "return", "self", ".", "data", "[", "slice_ind", ",", "...", "]" ]
Returns a slice of datapoints
[ "Returns", "a", "slice", "of", "datapoints" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L153-L157
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.save
def save(self, filename, compressed=True): """ Save a tensor to disk. """ # check for data if not self.has_data: return False # read ext and save accordingly _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) np.savez_compressed(filename, self.data[:self.cur_index,...]) else: if file_ext != TENSOR_EXT: raise ValueError('Can only save tensor with .npy extension') np.save(filename, self.data[:self.cur_index,...]) return True
python
def save(self, filename, compressed=True): """ Save a tensor to disk. """ # check for data if not self.has_data: return False # read ext and save accordingly _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) np.savez_compressed(filename, self.data[:self.cur_index,...]) else: if file_ext != TENSOR_EXT: raise ValueError('Can only save tensor with .npy extension') np.save(filename, self.data[:self.cur_index,...]) return True
[ "def", "save", "(", "self", ",", "filename", ",", "compressed", "=", "True", ")", ":", "# check for data", "if", "not", "self", ".", "has_data", ":", "return", "False", "# read ext and save accordingly", "_", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "compressed", ":", "if", "file_ext", "!=", "COMPRESSED_TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only save compressed tensor with %s extension'", "%", "(", "COMPRESSED_TENSOR_EXT", ")", ")", "np", ".", "savez_compressed", "(", "filename", ",", "self", ".", "data", "[", ":", "self", ".", "cur_index", ",", "...", "]", ")", "else", ":", "if", "file_ext", "!=", "TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only save tensor with .npy extension'", ")", "np", ".", "save", "(", "filename", ",", "self", ".", "data", "[", ":", "self", ".", "cur_index", ",", "...", "]", ")", "return", "True" ]
Save a tensor to disk.
[ "Save", "a", "tensor", "to", "disk", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L159-L176
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.load
def load(filename, compressed=True, prealloc=None): """ Loads a tensor from disk. """ # switch load based on file ext _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only load compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) data = np.load(filename)['arr_0'] else: if file_ext != TENSOR_EXT: raise ValueError('Can only load tensor with .npy extension') data = np.load(filename) # fill prealloc tensor if prealloc is not None: prealloc.reset() prealloc.add_batch(data) return prealloc # init new tensor tensor = Tensor(data.shape, data.dtype, data=data) return tensor
python
def load(filename, compressed=True, prealloc=None): """ Loads a tensor from disk. """ # switch load based on file ext _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only load compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) data = np.load(filename)['arr_0'] else: if file_ext != TENSOR_EXT: raise ValueError('Can only load tensor with .npy extension') data = np.load(filename) # fill prealloc tensor if prealloc is not None: prealloc.reset() prealloc.add_batch(data) return prealloc # init new tensor tensor = Tensor(data.shape, data.dtype, data=data) return tensor
[ "def", "load", "(", "filename", ",", "compressed", "=", "True", ",", "prealloc", "=", "None", ")", ":", "# switch load based on file ext", "_", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "compressed", ":", "if", "file_ext", "!=", "COMPRESSED_TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only load compressed tensor with %s extension'", "%", "(", "COMPRESSED_TENSOR_EXT", ")", ")", "data", "=", "np", ".", "load", "(", "filename", ")", "[", "'arr_0'", "]", "else", ":", "if", "file_ext", "!=", "TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only load tensor with .npy extension'", ")", "data", "=", "np", ".", "load", "(", "filename", ")", "# fill prealloc tensor", "if", "prealloc", "is", "not", "None", ":", "prealloc", ".", "reset", "(", ")", "prealloc", ".", "add_batch", "(", "data", ")", "return", "prealloc", "# init new tensor", "tensor", "=", "Tensor", "(", "data", ".", "shape", ",", "data", ".", "dtype", ",", "data", "=", "data", ")", "return", "tensor" ]
Loads a tensor from disk.
[ "Loads", "a", "tensor", "from", "disk", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L179-L200
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.datapoint_indices_for_tensor
def datapoint_indices_for_tensor(self, tensor_index): """ Returns the indices for all datapoints in the given tensor. """ if tensor_index >= self._num_tensors: raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors)) return self._file_num_to_indices[tensor_index]
python
def datapoint_indices_for_tensor(self, tensor_index): """ Returns the indices for all datapoints in the given tensor. """ if tensor_index >= self._num_tensors: raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors)) return self._file_num_to_indices[tensor_index]
[ "def", "datapoint_indices_for_tensor", "(", "self", ",", "tensor_index", ")", ":", "if", "tensor_index", ">=", "self", ".", "_num_tensors", ":", "raise", "ValueError", "(", "'Tensor index %d is greater than the number of tensors (%d)'", "%", "(", "tensor_index", ",", "self", ".", "_num_tensors", ")", ")", "return", "self", ".", "_file_num_to_indices", "[", "tensor_index", "]" ]
Returns the indices for all datapoints in the given tensor.
[ "Returns", "the", "indices", "for", "all", "datapoints", "in", "the", "given", "tensor", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L415-L419
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.tensor_index
def tensor_index(self, datapoint_index): """ Returns the index of the tensor containing the referenced datapoint. """ if datapoint_index >= self._num_datapoints: raise ValueError('Datapoint index %d is greater than the number of datapoints (%d)' %(datapoint_index, self._num_datapoints)) return self._index_to_file_num[datapoint_index]
python
def tensor_index(self, datapoint_index): """ Returns the index of the tensor containing the referenced datapoint. """ if datapoint_index >= self._num_datapoints: raise ValueError('Datapoint index %d is greater than the number of datapoints (%d)' %(datapoint_index, self._num_datapoints)) return self._index_to_file_num[datapoint_index]
[ "def", "tensor_index", "(", "self", ",", "datapoint_index", ")", ":", "if", "datapoint_index", ">=", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Datapoint index %d is greater than the number of datapoints (%d)'", "%", "(", "datapoint_index", ",", "self", ".", "_num_datapoints", ")", ")", "return", "self", ".", "_index_to_file_num", "[", "datapoint_index", "]" ]
Returns the index of the tensor containing the referenced datapoint.
[ "Returns", "the", "index", "of", "the", "tensor", "containing", "the", "referenced", "datapoint", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L421-L425
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.generate_tensor_filename
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
python
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
[ "def", "generate_tensor_filename", "(", "self", ",", "field_name", ",", "file_num", ",", "compressed", "=", "True", ")", ":", "file_ext", "=", "TENSOR_EXT", "if", "compressed", ":", "file_ext", "=", "COMPRESSED_TENSOR_EXT", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "filename", ",", "'tensors'", ",", "'%s_%05d%s'", "%", "(", "field_name", ",", "file_num", ",", "file_ext", ")", ")", "return", "filename" ]
Generate a filename for a tensor.
[ "Generate", "a", "filename", "for", "a", "tensor", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L427-L433
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset._allocate_tensors
def _allocate_tensors(self): """ Allocates the tensors in the dataset. """ # init tensors dict self._tensors = {} # allocate tensor for each data field for field_name, field_spec in self._config['fields'].items(): # parse attributes field_dtype = np.dtype(field_spec['dtype']) # parse shape field_shape = [self._datapoints_per_file] if 'height' in field_spec.keys(): field_shape.append(field_spec['height']) if 'width' in field_spec.keys(): field_shape.append(field_spec['width']) if 'channels' in field_spec.keys(): field_shape.append(field_spec['channels']) # create tensor self._tensors[field_name] = Tensor(field_shape, field_dtype)
python
def _allocate_tensors(self): """ Allocates the tensors in the dataset. """ # init tensors dict self._tensors = {} # allocate tensor for each data field for field_name, field_spec in self._config['fields'].items(): # parse attributes field_dtype = np.dtype(field_spec['dtype']) # parse shape field_shape = [self._datapoints_per_file] if 'height' in field_spec.keys(): field_shape.append(field_spec['height']) if 'width' in field_spec.keys(): field_shape.append(field_spec['width']) if 'channels' in field_spec.keys(): field_shape.append(field_spec['channels']) # create tensor self._tensors[field_name] = Tensor(field_shape, field_dtype)
[ "def", "_allocate_tensors", "(", "self", ")", ":", "# init tensors dict", "self", ".", "_tensors", "=", "{", "}", "# allocate tensor for each data field", "for", "field_name", ",", "field_spec", "in", "self", ".", "_config", "[", "'fields'", "]", ".", "items", "(", ")", ":", "# parse attributes", "field_dtype", "=", "np", ".", "dtype", "(", "field_spec", "[", "'dtype'", "]", ")", "# parse shape", "field_shape", "=", "[", "self", ".", "_datapoints_per_file", "]", "if", "'height'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'height'", "]", ")", "if", "'width'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'width'", "]", ")", "if", "'channels'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'channels'", "]", ")", "# create tensor", "self", ".", "_tensors", "[", "field_name", "]", "=", "Tensor", "(", "field_shape", ",", "field_dtype", ")" ]
Allocates the tensors in the dataset.
[ "Allocates", "the", "tensors", "in", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L459-L479
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.add
def add(self, datapoint): """ Adds a datapoint to the file. """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot add datapoints with read-only access') # read tensor datapoint ind tensor_ind = self._num_datapoints // self._datapoints_per_file # check datapoint fields for field_name in datapoint.keys(): if field_name not in self.field_names: raise ValueError('Field %s not specified in dataset' %(field_name)) # store data in tensor cur_num_tensors = self._num_tensors new_num_tensors = cur_num_tensors for field_name in self.field_names: if tensor_ind < cur_num_tensors: # load tensor if it was previously allocated self._tensors[field_name] = self.tensor(field_name, tensor_ind) else: # clear tensor if this is a new tensor self._tensors[field_name].reset() self._tensor_cache_file_num[field_name] = tensor_ind new_num_tensors = cur_num_tensors + 1 self._has_unsaved_data = True self._tensors[field_name].add(datapoint[field_name]) cur_size = self._tensors[field_name].size # update num tensors if new_num_tensors > cur_num_tensors: self._num_tensors = new_num_tensors # update file indices self._index_to_file_num[self._num_datapoints] = tensor_ind self._file_num_to_indices[tensor_ind] = tensor_ind * self._datapoints_per_file + np.arange(cur_size) # save if tensors are full field_name = self.field_names[0] if self._tensors[field_name].is_full: # save next tensors to file logging.info('Dataset %s: Writing tensor %d to disk' %(self.filename, tensor_ind)) self.write() # increment num datapoints self._num_datapoints += 1
python
def add(self, datapoint): """ Adds a datapoint to the file. """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot add datapoints with read-only access') # read tensor datapoint ind tensor_ind = self._num_datapoints // self._datapoints_per_file # check datapoint fields for field_name in datapoint.keys(): if field_name not in self.field_names: raise ValueError('Field %s not specified in dataset' %(field_name)) # store data in tensor cur_num_tensors = self._num_tensors new_num_tensors = cur_num_tensors for field_name in self.field_names: if tensor_ind < cur_num_tensors: # load tensor if it was previously allocated self._tensors[field_name] = self.tensor(field_name, tensor_ind) else: # clear tensor if this is a new tensor self._tensors[field_name].reset() self._tensor_cache_file_num[field_name] = tensor_ind new_num_tensors = cur_num_tensors + 1 self._has_unsaved_data = True self._tensors[field_name].add(datapoint[field_name]) cur_size = self._tensors[field_name].size # update num tensors if new_num_tensors > cur_num_tensors: self._num_tensors = new_num_tensors # update file indices self._index_to_file_num[self._num_datapoints] = tensor_ind self._file_num_to_indices[tensor_ind] = tensor_ind * self._datapoints_per_file + np.arange(cur_size) # save if tensors are full field_name = self.field_names[0] if self._tensors[field_name].is_full: # save next tensors to file logging.info('Dataset %s: Writing tensor %d to disk' %(self.filename, tensor_ind)) self.write() # increment num datapoints self._num_datapoints += 1
[ "def", "add", "(", "self", ",", "datapoint", ")", ":", "# check access level", "if", "self", ".", "_access_mode", "==", "READ_ONLY_ACCESS", ":", "raise", "ValueError", "(", "'Cannot add datapoints with read-only access'", ")", "# read tensor datapoint ind", "tensor_ind", "=", "self", ".", "_num_datapoints", "//", "self", ".", "_datapoints_per_file", "# check datapoint fields", "for", "field_name", "in", "datapoint", ".", "keys", "(", ")", ":", "if", "field_name", "not", "in", "self", ".", "field_names", ":", "raise", "ValueError", "(", "'Field %s not specified in dataset'", "%", "(", "field_name", ")", ")", "# store data in tensor", "cur_num_tensors", "=", "self", ".", "_num_tensors", "new_num_tensors", "=", "cur_num_tensors", "for", "field_name", "in", "self", ".", "field_names", ":", "if", "tensor_ind", "<", "cur_num_tensors", ":", "# load tensor if it was previously allocated", "self", ".", "_tensors", "[", "field_name", "]", "=", "self", ".", "tensor", "(", "field_name", ",", "tensor_ind", ")", "else", ":", "# clear tensor if this is a new tensor", "self", ".", "_tensors", "[", "field_name", "]", ".", "reset", "(", ")", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", "=", "tensor_ind", "new_num_tensors", "=", "cur_num_tensors", "+", "1", "self", ".", "_has_unsaved_data", "=", "True", "self", ".", "_tensors", "[", "field_name", "]", ".", "add", "(", "datapoint", "[", "field_name", "]", ")", "cur_size", "=", "self", ".", "_tensors", "[", "field_name", "]", ".", "size", "# update num tensors", "if", "new_num_tensors", ">", "cur_num_tensors", ":", "self", ".", "_num_tensors", "=", "new_num_tensors", "# update file indices", "self", ".", "_index_to_file_num", "[", "self", ".", "_num_datapoints", "]", "=", "tensor_ind", "self", ".", "_file_num_to_indices", "[", "tensor_ind", "]", "=", "tensor_ind", "*", "self", ".", "_datapoints_per_file", "+", "np", ".", "arange", "(", "cur_size", ")", "# save if tensors are full", "field_name", "=", "self", ".", "field_names", "[", "0", "]", "if", "self", ".", "_tensors", "[", "field_name", "]", ".", "is_full", ":", "# save next tensors to file", "logging", ".", "info", "(", "'Dataset %s: Writing tensor %d to disk'", "%", "(", "self", ".", "filename", ",", "tensor_ind", ")", ")", "self", ".", "write", "(", ")", "# increment num datapoints", "self", ".", "_num_datapoints", "+=", "1" ]
Adds a datapoint to the file.
[ "Adds", "a", "datapoint", "to", "the", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L481-L527
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.datapoint
def datapoint(self, ind, field_names=None): """ Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint """ # flush if necessary if self._has_unsaved_data: self.flush() # check valid input if ind >= self._num_datapoints: raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints)) # load the field names if field_names is None: field_names = self.field_names # return the datapoint datapoint = TensorDatapoint(field_names) file_num = self._index_to_file_num[ind] for field_name in field_names: tensor = self.tensor(field_name, file_num) tensor_index = ind % self._datapoints_per_file datapoint[field_name] = tensor.datapoint(tensor_index) return datapoint
python
def datapoint(self, ind, field_names=None): """ Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint """ # flush if necessary if self._has_unsaved_data: self.flush() # check valid input if ind >= self._num_datapoints: raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints)) # load the field names if field_names is None: field_names = self.field_names # return the datapoint datapoint = TensorDatapoint(field_names) file_num = self._index_to_file_num[ind] for field_name in field_names: tensor = self.tensor(field_name, file_num) tensor_index = ind % self._datapoints_per_file datapoint[field_name] = tensor.datapoint(tensor_index) return datapoint
[ "def", "datapoint", "(", "self", ",", "ind", ",", "field_names", "=", "None", ")", ":", "# flush if necessary", "if", "self", ".", "_has_unsaved_data", ":", "self", ".", "flush", "(", ")", "# check valid input", "if", "ind", ">=", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Index %d larger than the number of datapoints in the dataset (%d)'", "%", "(", "ind", ",", "self", ".", "_num_datapoints", ")", ")", "# load the field names", "if", "field_names", "is", "None", ":", "field_names", "=", "self", ".", "field_names", "# return the datapoint", "datapoint", "=", "TensorDatapoint", "(", "field_names", ")", "file_num", "=", "self", ".", "_index_to_file_num", "[", "ind", "]", "for", "field_name", "in", "field_names", ":", "tensor", "=", "self", ".", "tensor", "(", "field_name", ",", "file_num", ")", "tensor_index", "=", "ind", "%", "self", ".", "_datapoints_per_file", "datapoint", "[", "field_name", "]", "=", "tensor", ".", "datapoint", "(", "tensor_index", ")", "return", "datapoint" ]
Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint
[ "Loads", "a", "tensor", "datapoint", "for", "a", "given", "global", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L533-L567
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.tensor
def tensor(self, field_name, tensor_ind): """ Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor """ if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
python
def tensor(self, field_name, tensor_ind): """ Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor """ if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
[ "def", "tensor", "(", "self", ",", "field_name", ",", "tensor_ind", ")", ":", "if", "tensor_ind", "==", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", ":", "return", "self", ".", "_tensors", "[", "field_name", "]", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "tensor_ind", ",", "compressed", "=", "True", ")", "Tensor", ".", "load", "(", "filename", ",", "compressed", "=", "True", ",", "prealloc", "=", "self", ".", "_tensors", "[", "field_name", "]", ")", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", "=", "tensor_ind", "return", "self", ".", "_tensors", "[", "field_name", "]" ]
Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor
[ "Returns", "the", "tensor", "for", "a", "given", "field", "and", "tensor", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L569-L590
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.delete_last
def delete_last(self, num_to_delete=1): """ Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot delete datapoints with read-only access') # check num to delete if num_to_delete > self._num_datapoints: raise ValueError('Cannot remove more than the number of datapoints in the dataset') # compute indices last_datapoint_ind = self._num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self._datapoints_per_file new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max(new_last_datapoint_ind, 0) new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file # delete all but the last tensor delete_tensor_ind = range(new_last_tensor_ind+1, last_tensor_ind+1) for tensor_ind in delete_tensor_ind: for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, tensor_ind) os.remove(filename) # update last tensor dataset_empty = False target_tensor_size = new_num_datapoints % self._datapoints_per_file if target_tensor_size == 0: if new_num_datapoints > 0: target_tensor_size = self._datapoints_per_file else: dataset_empty = True for field_name in self.field_names: new_last_tensor = self.tensor(field_name, new_last_tensor_ind) while new_last_tensor.size > target_tensor_size: new_last_tensor.delete_last() filename = self.generate_tensor_filename(field_name, new_last_tensor_ind) new_last_tensor.save(filename, compressed=True) if not new_last_tensor.has_data: os.remove(filename) new_last_tensor.reset() # update num datapoints if self._num_datapoints - 1 - num_to_delete >= 0: self._num_datapoints = new_num_datapoints else: self._num_datapoints = 0 # handle deleted tensor self._num_tensors = new_last_tensor_ind + 1 if dataset_empty: self._num_tensors = 0
python
def delete_last(self, num_to_delete=1): """ Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot delete datapoints with read-only access') # check num to delete if num_to_delete > self._num_datapoints: raise ValueError('Cannot remove more than the number of datapoints in the dataset') # compute indices last_datapoint_ind = self._num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self._datapoints_per_file new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max(new_last_datapoint_ind, 0) new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file # delete all but the last tensor delete_tensor_ind = range(new_last_tensor_ind+1, last_tensor_ind+1) for tensor_ind in delete_tensor_ind: for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, tensor_ind) os.remove(filename) # update last tensor dataset_empty = False target_tensor_size = new_num_datapoints % self._datapoints_per_file if target_tensor_size == 0: if new_num_datapoints > 0: target_tensor_size = self._datapoints_per_file else: dataset_empty = True for field_name in self.field_names: new_last_tensor = self.tensor(field_name, new_last_tensor_ind) while new_last_tensor.size > target_tensor_size: new_last_tensor.delete_last() filename = self.generate_tensor_filename(field_name, new_last_tensor_ind) new_last_tensor.save(filename, compressed=True) if not new_last_tensor.has_data: os.remove(filename) new_last_tensor.reset() # update num datapoints if self._num_datapoints - 1 - num_to_delete >= 0: self._num_datapoints = new_num_datapoints else: self._num_datapoints = 0 # handle deleted tensor self._num_tensors = new_last_tensor_ind + 1 if dataset_empty: self._num_tensors = 0
[ "def", "delete_last", "(", "self", ",", "num_to_delete", "=", "1", ")", ":", "# check access level", "if", "self", ".", "_access_mode", "==", "READ_ONLY_ACCESS", ":", "raise", "ValueError", "(", "'Cannot delete datapoints with read-only access'", ")", "# check num to delete", "if", "num_to_delete", ">", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Cannot remove more than the number of datapoints in the dataset'", ")", "# compute indices", "last_datapoint_ind", "=", "self", ".", "_num_datapoints", "-", "1", "last_tensor_ind", "=", "last_datapoint_ind", "//", "self", ".", "_datapoints_per_file", "new_last_datapoint_ind", "=", "self", ".", "_num_datapoints", "-", "1", "-", "num_to_delete", "new_num_datapoints", "=", "new_last_datapoint_ind", "+", "1", "new_last_datapoint_ind", "=", "max", "(", "new_last_datapoint_ind", ",", "0", ")", "new_last_tensor_ind", "=", "new_last_datapoint_ind", "//", "self", ".", "_datapoints_per_file", "# delete all but the last tensor", "delete_tensor_ind", "=", "range", "(", "new_last_tensor_ind", "+", "1", ",", "last_tensor_ind", "+", "1", ")", "for", "tensor_ind", "in", "delete_tensor_ind", ":", "for", "field_name", "in", "self", ".", "field_names", ":", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "tensor_ind", ")", "os", ".", "remove", "(", "filename", ")", "# update last tensor", "dataset_empty", "=", "False", "target_tensor_size", "=", "new_num_datapoints", "%", "self", ".", "_datapoints_per_file", "if", "target_tensor_size", "==", "0", ":", "if", "new_num_datapoints", ">", "0", ":", "target_tensor_size", "=", "self", ".", "_datapoints_per_file", "else", ":", "dataset_empty", "=", "True", "for", "field_name", "in", "self", ".", "field_names", ":", "new_last_tensor", "=", "self", ".", "tensor", "(", "field_name", ",", "new_last_tensor_ind", ")", "while", "new_last_tensor", ".", "size", ">", "target_tensor_size", ":", "new_last_tensor", ".", "delete_last", "(", ")", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "new_last_tensor_ind", ")", "new_last_tensor", ".", "save", "(", "filename", ",", "compressed", "=", "True", ")", "if", "not", "new_last_tensor", ".", "has_data", ":", "os", ".", "remove", "(", "filename", ")", "new_last_tensor", ".", "reset", "(", ")", "# update num datapoints ", "if", "self", ".", "_num_datapoints", "-", "1", "-", "num_to_delete", ">=", "0", ":", "self", ".", "_num_datapoints", "=", "new_num_datapoints", "else", ":", "self", ".", "_num_datapoints", "=", "0", "# handle deleted tensor", "self", ".", "_num_tensors", "=", "new_last_tensor_ind", "+", "1", "if", "dataset_empty", ":", "self", ".", "_num_tensors", "=", "0" ]
Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset
[ "Deletes", "the", "last", "N", "datapoints", "from", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L617-L677
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.write
def write(self): """ Writes all tensors to the next file number. """ # write the next file for all fields for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, self._num_tensors-1) self._tensors[field_name].save(filename, compressed=True) # write the current metadata to file json.dump(self._metadata, open(self.metadata_filename, 'w'), indent=JSON_INDENT, sort_keys=True) # update self._has_unsaved_data = False
python
def write(self): """ Writes all tensors to the next file number. """ # write the next file for all fields for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, self._num_tensors-1) self._tensors[field_name].save(filename, compressed=True) # write the current metadata to file json.dump(self._metadata, open(self.metadata_filename, 'w'), indent=JSON_INDENT, sort_keys=True) # update self._has_unsaved_data = False
[ "def", "write", "(", "self", ")", ":", "# write the next file for all fields", "for", "field_name", "in", "self", ".", "field_names", ":", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "self", ".", "_num_tensors", "-", "1", ")", "self", ".", "_tensors", "[", "field_name", "]", ".", "save", "(", "filename", ",", "compressed", "=", "True", ")", "# write the current metadata to file", "json", ".", "dump", "(", "self", ".", "_metadata", ",", "open", "(", "self", ".", "metadata_filename", ",", "'w'", ")", ",", "indent", "=", "JSON_INDENT", ",", "sort_keys", "=", "True", ")", "# update", "self", ".", "_has_unsaved_data", "=", "False" ]
Writes all tensors to the next file number.
[ "Writes", "all", "tensors", "to", "the", "next", "file", "number", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L696-L709
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.open
def open(dataset_dir, access_mode=READ_ONLY_ACCESS): """ Opens a tensor dataset. """ # check access mode if access_mode == WRITE_ACCESS: raise ValueError('Cannot open a dataset with write-only access') # read config try: # json load config_filename = os.path.join(dataset_dir, 'config.json') config = json.load(open(config_filename, 'r')) except: # YAML load config_filename = os.path.join(dataset_dir, 'config.yaml') config = YamlConfig(config_filename) # open dataset dataset = TensorDataset(dataset_dir, config, access_mode=access_mode) return dataset
python
def open(dataset_dir, access_mode=READ_ONLY_ACCESS): """ Opens a tensor dataset. """ # check access mode if access_mode == WRITE_ACCESS: raise ValueError('Cannot open a dataset with write-only access') # read config try: # json load config_filename = os.path.join(dataset_dir, 'config.json') config = json.load(open(config_filename, 'r')) except: # YAML load config_filename = os.path.join(dataset_dir, 'config.yaml') config = YamlConfig(config_filename) # open dataset dataset = TensorDataset(dataset_dir, config, access_mode=access_mode) return dataset
[ "def", "open", "(", "dataset_dir", ",", "access_mode", "=", "READ_ONLY_ACCESS", ")", ":", "# check access mode", "if", "access_mode", "==", "WRITE_ACCESS", ":", "raise", "ValueError", "(", "'Cannot open a dataset with write-only access'", ")", "# read config", "try", ":", "# json load", "config_filename", "=", "os", ".", "path", ".", "join", "(", "dataset_dir", ",", "'config.json'", ")", "config", "=", "json", ".", "load", "(", "open", "(", "config_filename", ",", "'r'", ")", ")", "except", ":", "# YAML load", "config_filename", "=", "os", ".", "path", ".", "join", "(", "dataset_dir", ",", "'config.yaml'", ")", "config", "=", "YamlConfig", "(", "config_filename", ")", "# open dataset", "dataset", "=", "TensorDataset", "(", "dataset_dir", ",", "config", ",", "access_mode", "=", "access_mode", ")", "return", "dataset" ]
Opens a tensor dataset.
[ "Opens", "a", "tensor", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L716-L734
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.split
def split(self, split_name): """ Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split """ if not self.has_split(split_name): raise ValueError('Split %s does not exist!' %(split_name)) metadata_filename = self.split_metadata_filename(split_name) train_filename = self.train_indices_filename(split_name) val_filename = self.val_indices_filename(split_name) metadata = json.load(open(metadata_filename, 'r')) train_indices = np.load(train_filename)['arr_0'] val_indices = np.load(val_filename)['arr_0'] return train_indices, val_indices, metadata
python
def split(self, split_name): """ Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split """ if not self.has_split(split_name): raise ValueError('Split %s does not exist!' %(split_name)) metadata_filename = self.split_metadata_filename(split_name) train_filename = self.train_indices_filename(split_name) val_filename = self.val_indices_filename(split_name) metadata = json.load(open(metadata_filename, 'r')) train_indices = np.load(train_filename)['arr_0'] val_indices = np.load(val_filename)['arr_0'] return train_indices, val_indices, metadata
[ "def", "split", "(", "self", ",", "split_name", ")", ":", "if", "not", "self", ".", "has_split", "(", "split_name", ")", ":", "raise", "ValueError", "(", "'Split %s does not exist!'", "%", "(", "split_name", ")", ")", "metadata_filename", "=", "self", ".", "split_metadata_filename", "(", "split_name", ")", "train_filename", "=", "self", ".", "train_indices_filename", "(", "split_name", ")", "val_filename", "=", "self", ".", "val_indices_filename", "(", "split_name", ")", "metadata", "=", "json", ".", "load", "(", "open", "(", "metadata_filename", ",", "'r'", ")", ")", "train_indices", "=", "np", ".", "load", "(", "train_filename", ")", "[", "'arr_0'", "]", "val_indices", "=", "np", ".", "load", "(", "val_filename", ")", "[", "'arr_0'", "]", "return", "train_indices", ",", "val_indices", ",", "metadata" ]
Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split
[ "Return", "the", "training", "and", "validation", "indices", "for", "the", "requested", "split", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L736-L762
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.delete_split
def delete_split(self, split_name): """ Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete """ if self.has_split(split_name): shutil.rmtree(os.path.join(self.split_dir, split_name))
python
def delete_split(self, split_name): """ Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete """ if self.has_split(split_name): shutil.rmtree(os.path.join(self.split_dir, split_name))
[ "def", "delete_split", "(", "self", ",", "split_name", ")", ":", "if", "self", ".", "has_split", "(", "split_name", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "self", ".", "split_dir", ",", "split_name", ")", ")" ]
Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete
[ "Delete", "a", "split", "of", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L878-L887
train
BerkeleyAutomation/autolab_core
autolab_core/yaml_config.py
YamlConfig._load_config
def _load_config(self, filename): """Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration. """ # Read entire file for metadata fh = open(filename, 'r') self.file_contents = fh.read() # Replace !include directives with content config_dir = os.path.split(filename)[0] include_re = re.compile('^(.*)!include\s+(.*)$', re.MULTILINE) def recursive_load(matchobj, path): first_spacing = matchobj.group(1) other_spacing = first_spacing.replace('-', ' ') fname = os.path.join(path, matchobj.group(2)) new_path, _ = os.path.split(fname) new_path = os.path.realpath(new_path) text = '' with open(fname) as f: text = f.read() text = first_spacing + text text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) return re.sub(include_re, lambda m : recursive_load(m, new_path), text) # def include_repl(matchobj): # first_spacing = matchobj.group(1) # other_spacing = first_spacing.replace('-', ' ') # fname = os.path.join(config_dir, matchobj.group(2)) # text = '' # with open(fname) as f: # text = f.read() # text = first_spacing + text # text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) # return text self.file_contents = re.sub(include_re, lambda m : recursive_load(m, config_dir), self.file_contents) # Read in dictionary self.config = self.__ordered_load(self.file_contents) # Convert functions of other params to true expressions for k in self.config.keys(): self.config[k] = YamlConfig.__convert_key(self.config[k]) fh.close() # Load core configuration return self.config
python
def _load_config(self, filename): """Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration. """ # Read entire file for metadata fh = open(filename, 'r') self.file_contents = fh.read() # Replace !include directives with content config_dir = os.path.split(filename)[0] include_re = re.compile('^(.*)!include\s+(.*)$', re.MULTILINE) def recursive_load(matchobj, path): first_spacing = matchobj.group(1) other_spacing = first_spacing.replace('-', ' ') fname = os.path.join(path, matchobj.group(2)) new_path, _ = os.path.split(fname) new_path = os.path.realpath(new_path) text = '' with open(fname) as f: text = f.read() text = first_spacing + text text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) return re.sub(include_re, lambda m : recursive_load(m, new_path), text) # def include_repl(matchobj): # first_spacing = matchobj.group(1) # other_spacing = first_spacing.replace('-', ' ') # fname = os.path.join(config_dir, matchobj.group(2)) # text = '' # with open(fname) as f: # text = f.read() # text = first_spacing + text # text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) # return text self.file_contents = re.sub(include_re, lambda m : recursive_load(m, config_dir), self.file_contents) # Read in dictionary self.config = self.__ordered_load(self.file_contents) # Convert functions of other params to true expressions for k in self.config.keys(): self.config[k] = YamlConfig.__convert_key(self.config[k]) fh.close() # Load core configuration return self.config
[ "def", "_load_config", "(", "self", ",", "filename", ")", ":", "# Read entire file for metadata", "fh", "=", "open", "(", "filename", ",", "'r'", ")", "self", ".", "file_contents", "=", "fh", ".", "read", "(", ")", "# Replace !include directives with content", "config_dir", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "0", "]", "include_re", "=", "re", ".", "compile", "(", "'^(.*)!include\\s+(.*)$'", ",", "re", ".", "MULTILINE", ")", "def", "recursive_load", "(", "matchobj", ",", "path", ")", ":", "first_spacing", "=", "matchobj", ".", "group", "(", "1", ")", "other_spacing", "=", "first_spacing", ".", "replace", "(", "'-'", ",", "' '", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "matchobj", ".", "group", "(", "2", ")", ")", "new_path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "fname", ")", "new_path", "=", "os", ".", "path", ".", "realpath", "(", "new_path", ")", "text", "=", "''", "with", "open", "(", "fname", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "text", "=", "first_spacing", "+", "text", "text", "=", "text", ".", "replace", "(", "'\\n'", ",", "'\\n{}'", ".", "format", "(", "other_spacing", ")", ",", "text", ".", "count", "(", "'\\n'", ")", "-", "1", ")", "return", "re", ".", "sub", "(", "include_re", ",", "lambda", "m", ":", "recursive_load", "(", "m", ",", "new_path", ")", ",", "text", ")", "# def include_repl(matchobj):", "# first_spacing = matchobj.group(1)", "# other_spacing = first_spacing.replace('-', ' ')", "# fname = os.path.join(config_dir, matchobj.group(2))", "# text = ''", "# with open(fname) as f:", "# text = f.read()", "# text = first_spacing + text", "# text = text.replace('\\n', '\\n{}'.format(other_spacing), text.count('\\n') - 1)", "# return text", "self", ".", "file_contents", "=", "re", ".", "sub", "(", "include_re", ",", "lambda", "m", ":", "recursive_load", "(", "m", ",", "config_dir", ")", ",", "self", ".", "file_contents", ")", "# Read in dictionary", "self", ".", "config", "=", "self", ".", "__ordered_load", "(", "self", ".", "file_contents", ")", "# Convert functions of other params to true expressions", "for", "k", "in", "self", ".", "config", ".", "keys", "(", ")", ":", "self", ".", "config", "[", "k", "]", "=", "YamlConfig", ".", "__convert_key", "(", "self", ".", "config", "[", "k", "]", ")", "fh", ".", "close", "(", ")", "# Load core configuration", "return", "self", ".", "config" ]
Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration.
[ "Loads", "a", "yaml", "configuration", "file", "from", "the", "given", "filename", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/yaml_config.py#L75-L126
train
BerkeleyAutomation/autolab_core
autolab_core/yaml_config.py
YamlConfig.__convert_key
def __convert_key(expression): """Converts keys in YAML that reference other keys. """ if type(expression) is str and len(expression) > 2 and expression[1] == '!': expression = eval(expression[2:-1]) return expression
python
def __convert_key(expression): """Converts keys in YAML that reference other keys. """ if type(expression) is str and len(expression) > 2 and expression[1] == '!': expression = eval(expression[2:-1]) return expression
[ "def", "__convert_key", "(", "expression", ")", ":", "if", "type", "(", "expression", ")", "is", "str", "and", "len", "(", "expression", ")", ">", "2", "and", "expression", "[", "1", "]", "==", "'!'", ":", "expression", "=", "eval", "(", "expression", "[", "2", ":", "-", "1", "]", ")", "return", "expression" ]
Converts keys in YAML that reference other keys.
[ "Converts", "keys", "in", "YAML", "that", "reference", "other", "keys", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/yaml_config.py#L129-L134
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
ClassificationResult.make_summary_table
def make_summary_table(train_result, val_result, plot=True, save_dir=None, prepend="", save=False): """ Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table """ table_key_list = ['error_rate', 'recall_at_99_precision', 'average_precision', 'precision', 'recall'] num_fields = len(table_key_list) import matplotlib.pyplot as plt ax = plt.subplot(111, frame_on=False) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) data = np.zeros([num_fields, 2]) data_dict = dict() names = ['train', 'validation'] for name, result in zip(names, [train_result, val_result]): data_dict[name] = {} data_dict[name]['error_rate'] = result.error_rate data_dict[name]['average_precision'] = result.ap_score * 100 data_dict[name]['precision'] = result.precision * 100 data_dict[name]['recall'] = result.recall * 100 precision_array, recall_array, _ = result.precision_recall_curve() recall_at_99_precision = recall_array[np.argmax(precision_array > 0.99)] * 100 # to put it in percentage terms data_dict[name]['recall_at_99_precision'] = recall_at_99_precision for i, key in enumerate(table_key_list): data_dict[name][key] = float("{0:.2f}".format(data_dict[name][key])) j = names.index(name) data[i, j] = data_dict[name][key] table = plt.table(cellText=data, rowLabels=table_key_list, colLabels=names) fig = plt.gcf() fig.subplots_adjust(bottom=0.15) if plot: plt.show() # save the results if save_dir is not None and save: fig_filename = os.path.join(save_dir, prepend + 'summary.png') yaml_filename = os.path.join(save_dir, prepend + 'summary.yaml') yaml.dump(data_dict, open(yaml_filename, 'w'), default_flow_style=False) fig.savefig(fig_filename, bbox_inches="tight") return data_dict, fig
python
def make_summary_table(train_result, val_result, plot=True, save_dir=None, prepend="", save=False): """ Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table """ table_key_list = ['error_rate', 'recall_at_99_precision', 'average_precision', 'precision', 'recall'] num_fields = len(table_key_list) import matplotlib.pyplot as plt ax = plt.subplot(111, frame_on=False) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) data = np.zeros([num_fields, 2]) data_dict = dict() names = ['train', 'validation'] for name, result in zip(names, [train_result, val_result]): data_dict[name] = {} data_dict[name]['error_rate'] = result.error_rate data_dict[name]['average_precision'] = result.ap_score * 100 data_dict[name]['precision'] = result.precision * 100 data_dict[name]['recall'] = result.recall * 100 precision_array, recall_array, _ = result.precision_recall_curve() recall_at_99_precision = recall_array[np.argmax(precision_array > 0.99)] * 100 # to put it in percentage terms data_dict[name]['recall_at_99_precision'] = recall_at_99_precision for i, key in enumerate(table_key_list): data_dict[name][key] = float("{0:.2f}".format(data_dict[name][key])) j = names.index(name) data[i, j] = data_dict[name][key] table = plt.table(cellText=data, rowLabels=table_key_list, colLabels=names) fig = plt.gcf() fig.subplots_adjust(bottom=0.15) if plot: plt.show() # save the results if save_dir is not None and save: fig_filename = os.path.join(save_dir, prepend + 'summary.png') yaml_filename = os.path.join(save_dir, prepend + 'summary.yaml') yaml.dump(data_dict, open(yaml_filename, 'w'), default_flow_style=False) fig.savefig(fig_filename, bbox_inches="tight") return data_dict, fig
[ "def", "make_summary_table", "(", "train_result", ",", "val_result", ",", "plot", "=", "True", ",", "save_dir", "=", "None", ",", "prepend", "=", "\"\"", ",", "save", "=", "False", ")", ":", "table_key_list", "=", "[", "'error_rate'", ",", "'recall_at_99_precision'", ",", "'average_precision'", ",", "'precision'", ",", "'recall'", "]", "num_fields", "=", "len", "(", "table_key_list", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "ax", "=", "plt", ".", "subplot", "(", "111", ",", "frame_on", "=", "False", ")", "ax", ".", "xaxis", ".", "set_visible", "(", "False", ")", "ax", ".", "yaxis", ".", "set_visible", "(", "False", ")", "data", "=", "np", ".", "zeros", "(", "[", "num_fields", ",", "2", "]", ")", "data_dict", "=", "dict", "(", ")", "names", "=", "[", "'train'", ",", "'validation'", "]", "for", "name", ",", "result", "in", "zip", "(", "names", ",", "[", "train_result", ",", "val_result", "]", ")", ":", "data_dict", "[", "name", "]", "=", "{", "}", "data_dict", "[", "name", "]", "[", "'error_rate'", "]", "=", "result", ".", "error_rate", "data_dict", "[", "name", "]", "[", "'average_precision'", "]", "=", "result", ".", "ap_score", "*", "100", "data_dict", "[", "name", "]", "[", "'precision'", "]", "=", "result", ".", "precision", "*", "100", "data_dict", "[", "name", "]", "[", "'recall'", "]", "=", "result", ".", "recall", "*", "100", "precision_array", ",", "recall_array", ",", "_", "=", "result", ".", "precision_recall_curve", "(", ")", "recall_at_99_precision", "=", "recall_array", "[", "np", ".", "argmax", "(", "precision_array", ">", "0.99", ")", "]", "*", "100", "# to put it in percentage terms", "data_dict", "[", "name", "]", "[", "'recall_at_99_precision'", "]", "=", "recall_at_99_precision", "for", "i", ",", "key", "in", "enumerate", "(", "table_key_list", ")", ":", "data_dict", "[", "name", "]", "[", "key", "]", "=", "float", "(", "\"{0:.2f}\"", ".", "format", "(", "data_dict", "[", "name", "]", "[", "key", "]", ")", ")", "j", "=", "names", ".", "index", "(", "name", ")", "data", "[", "i", ",", "j", "]", "=", "data_dict", "[", "name", "]", "[", "key", "]", "table", "=", "plt", ".", "table", "(", "cellText", "=", "data", ",", "rowLabels", "=", "table_key_list", ",", "colLabels", "=", "names", ")", "fig", "=", "plt", ".", "gcf", "(", ")", "fig", ".", "subplots_adjust", "(", "bottom", "=", "0.15", ")", "if", "plot", ":", "plt", ".", "show", "(", ")", "# save the results", "if", "save_dir", "is", "not", "None", "and", "save", ":", "fig_filename", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "prepend", "+", "'summary.png'", ")", "yaml_filename", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "prepend", "+", "'summary.yaml'", ")", "yaml", ".", "dump", "(", "data_dict", ",", "open", "(", "yaml_filename", ",", "'w'", ")", ",", "default_flow_style", "=", "False", ")", "fig", ".", "savefig", "(", "fig_filename", ",", "bbox_inches", "=", "\"tight\"", ")", "return", "data_dict", ",", "fig" ]
Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table
[ "Makes", "a", "matplotlib", "table", "object", "with", "relevant", "data", ".", "Thanks", "to", "Lucas", "Manuelli", "for", "the", "contribution", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L236-L304
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.app_score
def app_score(self): """ Computes the area under the app curve. """ # compute curve precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) # compute area app = 0 total = 0 for k in range(len(precisions)-1): # read cur data cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] # read next data next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] # approximate with rectangles mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
python
def app_score(self): """ Computes the area under the app curve. """ # compute curve precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) # compute area app = 0 total = 0 for k in range(len(precisions)-1): # read cur data cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] # read next data next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] # approximate with rectangles mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
[ "def", "app_score", "(", "self", ")", ":", "# compute curve", "precisions", ",", "pct_pred_pos", ",", "taus", "=", "self", ".", "precision_pct_pred_pos_curve", "(", "interval", "=", "False", ")", "# compute area", "app", "=", "0", "total", "=", "0", "for", "k", "in", "range", "(", "len", "(", "precisions", ")", "-", "1", ")", ":", "# read cur data", "cur_prec", "=", "precisions", "[", "k", "]", "cur_pp", "=", "pct_pred_pos", "[", "k", "]", "cur_tau", "=", "taus", "[", "k", "]", "# read next data", "next_prec", "=", "precisions", "[", "k", "+", "1", "]", "next_pp", "=", "pct_pred_pos", "[", "k", "+", "1", "]", "next_tau", "=", "taus", "[", "k", "+", "1", "]", "# approximate with rectangles", "mid_prec", "=", "(", "cur_prec", "+", "next_prec", ")", "/", "2.0", "width_pp", "=", "np", ".", "abs", "(", "next_pp", "-", "cur_pp", ")", "app", "+=", "mid_prec", "*", "width_pp", "total", "+=", "width_pp", "return", "app" ]
Computes the area under the app curve.
[ "Computes", "the", "area", "under", "the", "app", "curve", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L467-L492
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.accuracy_curve
def accuracy_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification accuracy. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.accuracy) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.accuracy) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def accuracy_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification accuracy. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.accuracy) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.accuracy) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "accuracy_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification accuracy.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "accuracy", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L515-L541
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.f1_curve
def f1_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification F1 score. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.f1_score) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.f1_score) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def f1_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification F1 score. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.f1_score) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.f1_score) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "f1_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification F1 score.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "F1", "score", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L599-L625
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.phi_coef_curve
def phi_coef_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification phi coefficient. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.phi_coef) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.phi_coef) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def phi_coef_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification phi coefficient. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.phi_coef) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.phi_coef) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "phi_coef_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "phi_coef", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "phi_coef", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification phi coefficient.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "phi", "coefficient", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L627-L653
train
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.precision_pct_pred_pos_curve
def precision_pct_pred_pos_curve(self, interval=False, delta_tau=0.001): """ Computes the relationship between precision and the percent of positively classified datapoints . """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values precisions = [] pct_pred_pos = [] taus = [] tau = 0 if not interval: for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau = sorted_probs[k] else: while tau < 1.0: # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau += delta_tau # add last datapoint tau = 1.0 self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) precisions.append(1.0) pct_pred_pos.append(0.0) taus.append(1.0 + 1e-12) self.threshold = orig_thresh return precisions, pct_pred_pos, taus
python
def precision_pct_pred_pos_curve(self, interval=False, delta_tau=0.001): """ Computes the relationship between precision and the percent of positively classified datapoints . """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values precisions = [] pct_pred_pos = [] taus = [] tau = 0 if not interval: for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau = sorted_probs[k] else: while tau < 1.0: # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau += delta_tau # add last datapoint tau = 1.0 self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) precisions.append(1.0) pct_pred_pos.append(0.0) taus.append(1.0 + 1e-12) self.threshold = orig_thresh return precisions, pct_pred_pos, taus
[ "def", "precision_pct_pred_pos_curve", "(", "self", ",", "interval", "=", "False", ",", "delta_tau", "=", "0.001", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "precisions", "=", "[", "]", "pct_pred_pos", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "if", "not", "interval", ":", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "else", ":", "while", "tau", "<", "1.0", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "+=", "delta_tau", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "precisions", ".", "append", "(", "1.0", ")", "pct_pred_pos", ".", "append", "(", "0.0", ")", "taus", ".", "append", "(", "1.0", "+", "1e-12", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "precisions", ",", "pct_pred_pos", ",", "taus" ]
Computes the relationship between precision and the percent of positively classified datapoints .
[ "Computes", "the", "relationship", "between", "precision", "and", "the", "percent", "of", "positively", "classified", "datapoints", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L655-L700
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
gen_experiment_id
def gen_experiment_id(n=10): """Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters. """ chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np.random.randint(0,len(chrs), size=n) return ''.join([chrs[i] for i in inds])
python
def gen_experiment_id(n=10): """Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters. """ chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np.random.randint(0,len(chrs), size=n) return ''.join([chrs[i] for i in inds])
[ "def", "gen_experiment_id", "(", "n", "=", "10", ")", ":", "chrs", "=", "'abcdefghijklmnopqrstuvwxyz'", "inds", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "chrs", ")", ",", "size", "=", "n", ")", "return", "''", ".", "join", "(", "[", "chrs", "[", "i", "]", "for", "i", "in", "inds", "]", ")" ]
Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters.
[ "Generate", "a", "random", "string", "with", "n", "characters", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L11-L26
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
histogram
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
python
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
[ "def", "histogram", "(", "values", ",", "num_bins", ",", "bounds", ",", "normalized", "=", "True", ",", "plot", "=", "False", ",", "color", "=", "'b'", ")", ":", "hist", ",", "bins", "=", "np", ".", "histogram", "(", "values", ",", "bins", "=", "num_bins", ",", "range", "=", "bounds", ")", "width", "=", "(", "bins", "[", "1", "]", "-", "bins", "[", "0", "]", ")", "if", "normalized", ":", "if", "np", ".", "sum", "(", "hist", ")", ">", "0", ":", "hist", "=", "hist", ".", "astype", "(", "np", ".", "float32", ")", "/", "np", ".", "sum", "(", "hist", ")", "if", "plot", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "bar", "(", "bins", "[", ":", "-", "1", "]", ",", "hist", ",", "width", "=", "width", ",", "color", "=", "color", ")", "return", "hist", ",", "bins" ]
Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays.
[ "Generate", "a", "histogram", "plot", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L64-L102
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
skew
def skew(xi): """Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector. """ S = np.array([[0, -xi[2], xi[1]], [xi[2], 0, -xi[0]], [-xi[1], xi[0], 0]]) return S
python
def skew(xi): """Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector. """ S = np.array([[0, -xi[2], xi[1]], [xi[2], 0, -xi[0]], [-xi[1], xi[0], 0]]) return S
[ "def", "skew", "(", "xi", ")", ":", "S", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "xi", "[", "2", "]", ",", "xi", "[", "1", "]", "]", ",", "[", "xi", "[", "2", "]", ",", "0", ",", "-", "xi", "[", "0", "]", "]", ",", "[", "-", "xi", "[", "1", "]", ",", "xi", "[", "0", "]", ",", "0", "]", "]", ")", "return", "S" ]
Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector.
[ "Return", "the", "skew", "-", "symmetric", "matrix", "that", "can", "be", "used", "to", "calculate", "cross", "-", "products", "with", "vector", "xi", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L104-L124
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
deskew
def deskew(S): """Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix. """ x = np.zeros(3) x[0] = S[2,1] x[1] = S[0,2] x[2] = S[1,0] return x
python
def deskew(S): """Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix. """ x = np.zeros(3) x[0] = S[2,1] x[1] = S[0,2] x[2] = S[1,0] return x
[ "def", "deskew", "(", "S", ")", ":", "x", "=", "np", ".", "zeros", "(", "3", ")", "x", "[", "0", "]", "=", "S", "[", "2", ",", "1", "]", "x", "[", "1", "]", "=", "S", "[", "0", ",", "2", "]", "x", "[", "2", "]", "=", "S", "[", "1", ",", "0", "]", "return", "x" ]
Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix.
[ "Converts", "a", "skew", "-", "symmetric", "cross", "-", "product", "matrix", "to", "its", "corresponding", "vector", ".", "Only", "works", "for", "3x3", "matrices", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L126-L144
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
reverse_dictionary
def reverse_dictionary(d): """ Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped """ rev_d = {} [rev_d.update({v:k}) for k, v in d.items()] return rev_d
python
def reverse_dictionary(d): """ Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped """ rev_d = {} [rev_d.update({v:k}) for k, v in d.items()] return rev_d
[ "def", "reverse_dictionary", "(", "d", ")", ":", "rev_d", "=", "{", "}", "[", "rev_d", ".", "update", "(", "{", "v", ":", "k", "}", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "]", "return", "rev_d" ]
Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped
[ "Reverses", "the", "key", "value", "pairs", "for", "a", "given", "dictionary", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L146-L161
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
filenames
def filenames(directory, tag='', sorted=False, recursive=False): """ Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from """ if recursive: f = [os.path.join(directory, f) for directory, _, filename in os.walk(directory) for f in filename if f.find(tag) > -1] else: f = [os.path.join(directory, f) for f in os.listdir(directory) if f.find(tag) > -1] if sorted: f.sort() return f
python
def filenames(directory, tag='', sorted=False, recursive=False): """ Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from """ if recursive: f = [os.path.join(directory, f) for directory, _, filename in os.walk(directory) for f in filename if f.find(tag) > -1] else: f = [os.path.join(directory, f) for f in os.listdir(directory) if f.find(tag) > -1] if sorted: f.sort() return f
[ "def", "filenames", "(", "directory", ",", "tag", "=", "''", ",", "sorted", "=", "False", ",", "recursive", "=", "False", ")", ":", "if", "recursive", ":", "f", "=", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "f", ")", "for", "directory", ",", "_", ",", "filename", "in", "os", ".", "walk", "(", "directory", ")", "for", "f", "in", "filename", "if", "f", ".", "find", "(", "tag", ")", ">", "-", "1", "]", "else", ":", "f", "=", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "directory", ")", "if", "f", ".", "find", "(", "tag", ")", ">", "-", "1", "]", "if", "sorted", ":", "f", ".", "sort", "(", ")", "return", "f" ]
Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from
[ "Reads", "in", "all", "filenames", "from", "a", "directory", "that", "contain", "a", "specified", "substring", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L178-L203
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
sph2cart
def sph2cart(r, az, elev): """ Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate """ x = r * np.cos(az) * np.sin(elev) y = r * np.sin(az) * np.sin(elev) z = r * np.cos(elev) return x, y, z
python
def sph2cart(r, az, elev): """ Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate """ x = r * np.cos(az) * np.sin(elev) y = r * np.sin(az) * np.sin(elev) z = r * np.cos(elev) return x, y, z
[ "def", "sph2cart", "(", "r", ",", "az", ",", "elev", ")", ":", "x", "=", "r", "*", "np", ".", "cos", "(", "az", ")", "*", "np", ".", "sin", "(", "elev", ")", "y", "=", "r", "*", "np", ".", "sin", "(", "az", ")", "*", "np", ".", "sin", "(", "elev", ")", "z", "=", "r", "*", "np", ".", "cos", "(", "elev", ")", "return", "x", ",", "y", ",", "z" ]
Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate
[ "Convert", "spherical", "to", "cartesian", "coordinates", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L205-L229
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
cart2sph
def cart2sph(x, y, z): """ Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation """ r = np.sqrt(x**2 + y**2 + z**2) if x > 0 and y > 0: az = np.arctan(y / x) elif x > 0 and y < 0: az = 2*np.pi - np.arctan(-y / x) elif x < 0 and y > 0: az = np.pi - np.arctan(-y / x) elif x < 0 and y < 0: az = np.pi + np.arctan(y / x) elif x == 0 and y > 0: az = np.pi / 2 elif x == 0 and y < 0: az = 3 * np.pi / 2 elif y == 0 and x > 0: az = 0 elif y == 0 and x < 0: az = np.pi elev = np.arccos(z / r) return r, az, elev
python
def cart2sph(x, y, z): """ Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation """ r = np.sqrt(x**2 + y**2 + z**2) if x > 0 and y > 0: az = np.arctan(y / x) elif x > 0 and y < 0: az = 2*np.pi - np.arctan(-y / x) elif x < 0 and y > 0: az = np.pi - np.arctan(-y / x) elif x < 0 and y < 0: az = np.pi + np.arctan(y / x) elif x == 0 and y > 0: az = np.pi / 2 elif x == 0 and y < 0: az = 3 * np.pi / 2 elif y == 0 and x > 0: az = 0 elif y == 0 and x < 0: az = np.pi elev = np.arccos(z / r) return r, az, elev
[ "def", "cart2sph", "(", "x", ",", "y", ",", "z", ")", ":", "r", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", "+", "z", "**", "2", ")", "if", "x", ">", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "arctan", "(", "y", "/", "x", ")", "elif", "x", ">", "0", "and", "y", "<", "0", ":", "az", "=", "2", "*", "np", ".", "pi", "-", "np", ".", "arctan", "(", "-", "y", "/", "x", ")", "elif", "x", "<", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "pi", "-", "np", ".", "arctan", "(", "-", "y", "/", "x", ")", "elif", "x", "<", "0", "and", "y", "<", "0", ":", "az", "=", "np", ".", "pi", "+", "np", ".", "arctan", "(", "y", "/", "x", ")", "elif", "x", "==", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "pi", "/", "2", "elif", "x", "==", "0", "and", "y", "<", "0", ":", "az", "=", "3", "*", "np", ".", "pi", "/", "2", "elif", "y", "==", "0", "and", "x", ">", "0", ":", "az", "=", "0", "elif", "y", "==", "0", "and", "x", "<", "0", ":", "az", "=", "np", ".", "pi", "elev", "=", "np", ".", "arccos", "(", "z", "/", "r", ")", "return", "r", ",", "az", ",", "elev" ]
Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation
[ "Convert", "cartesian", "to", "spherical", "coordinates", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L231-L270
train
BerkeleyAutomation/autolab_core
autolab_core/utils.py
keyboard_input
def keyboard_input(message, yesno=False): """ Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human """ # add space for readability message += ' ' # add yes or no to message if yesno: message += '[y/n] ' # ask human human_input = input(message) if yesno: while human_input.lower() != 'n' and human_input.lower() != 'y': logging.info('Did not understand input. Please answer \'y\' or \'n\'') human_input = input(message) return human_input
python
def keyboard_input(message, yesno=False): """ Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human """ # add space for readability message += ' ' # add yes or no to message if yesno: message += '[y/n] ' # ask human human_input = input(message) if yesno: while human_input.lower() != 'n' and human_input.lower() != 'y': logging.info('Did not understand input. Please answer \'y\' or \'n\'') human_input = input(message) return human_input
[ "def", "keyboard_input", "(", "message", ",", "yesno", "=", "False", ")", ":", "# add space for readability", "message", "+=", "' '", "# add yes or no to message", "if", "yesno", ":", "message", "+=", "'[y/n] '", "# ask human", "human_input", "=", "input", "(", "message", ")", "if", "yesno", ":", "while", "human_input", ".", "lower", "(", ")", "!=", "'n'", "and", "human_input", ".", "lower", "(", ")", "!=", "'y'", ":", "logging", ".", "info", "(", "'Did not understand input. Please answer \\'y\\' or \\'n\\''", ")", "human_input", "=", "input", "(", "message", ")", "return", "human_input" ]
Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human
[ "Get", "keyboard", "input", "from", "a", "human", "optionally", "reasking", "for", "valid", "yes", "or", "no", "input", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L272-L301
train
BerkeleyAutomation/autolab_core
autolab_core/dual_quaternion.py
DualQuaternion.interpolate
def interpolate(dq0, dq1, t): """Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1]. """ if not 0 <= t <= 1: raise ValueError("Interpolation step must be between 0 and 1! Got {0}".format(t)) dqt = dq0 * (1-t) + dq1 * t return dqt.normalized
python
def interpolate(dq0, dq1, t): """Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1]. """ if not 0 <= t <= 1: raise ValueError("Interpolation step must be between 0 and 1! Got {0}".format(t)) dqt = dq0 * (1-t) + dq1 * t return dqt.normalized
[ "def", "interpolate", "(", "dq0", ",", "dq1", ",", "t", ")", ":", "if", "not", "0", "<=", "t", "<=", "1", ":", "raise", "ValueError", "(", "\"Interpolation step must be between 0 and 1! Got {0}\"", ".", "format", "(", "t", ")", ")", "dqt", "=", "dq0", "*", "(", "1", "-", "t", ")", "+", "dq1", "*", "t", "return", "dqt", ".", "normalized" ]
Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1].
[ "Return", "the", "interpolation", "of", "two", "DualQuaternions", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/dual_quaternion.py#L129-L162
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel._save
def _save(self): """Save the model to a .csv file """ # if not first time saving, copy .csv to a backup if os.path.isfile(self._full_filename): shutil.copyfile(self._full_filename, self._full_backup_filename) # write to csv with open(self._full_filename, 'w') as file: writer = csv.DictWriter(file, fieldnames=self._headers) writer.writeheader() for row in self._table: writer.writerow(row)
python
def _save(self): """Save the model to a .csv file """ # if not first time saving, copy .csv to a backup if os.path.isfile(self._full_filename): shutil.copyfile(self._full_filename, self._full_backup_filename) # write to csv with open(self._full_filename, 'w') as file: writer = csv.DictWriter(file, fieldnames=self._headers) writer.writeheader() for row in self._table: writer.writerow(row)
[ "def", "_save", "(", "self", ")", ":", "# if not first time saving, copy .csv to a backup", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "_full_filename", ")", ":", "shutil", ".", "copyfile", "(", "self", ".", "_full_filename", ",", "self", ".", "_full_backup_filename", ")", "# write to csv", "with", "open", "(", "self", ".", "_full_filename", ",", "'w'", ")", "as", "file", ":", "writer", "=", "csv", ".", "DictWriter", "(", "file", ",", "fieldnames", "=", "self", ".", "_headers", ")", "writer", ".", "writeheader", "(", ")", "for", "row", "in", "self", ".", "_table", ":", "writer", ".", "writerow", "(", "row", ")" ]
Save the model to a .csv file
[ "Save", "the", "model", "to", "a", ".", "csv", "file" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L103-L115
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.insert
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
python
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
[ "def", "insert", "(", "self", ",", "data", ")", ":", "row", "=", "{", "key", ":", "self", ".", "_default_entry", "for", "key", "in", "self", ".", "_headers", "}", "row", "[", "'_uid'", "]", "=", "self", ".", "_get_new_uid", "(", ")", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "if", "key", "in", "(", "'_uid'", ",", "'_default'", ")", ":", "logging", ".", "warn", "(", "\"Cannot manually set columns _uid or _default of a row! Given data: {0}\"", ".", "format", "(", "data", ")", ")", "continue", "if", "not", "isinstance", "(", "val", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ")", ":", "raise", "Exception", "(", "'Data type mismatch for column {0}. Expected: {1}, got: {2}'", ".", "format", "(", "key", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ",", "type", "(", "val", ")", ")", ")", "row", "[", "key", "]", "=", "val", "self", ".", "_table", ".", "append", "(", "row", ")", "self", ".", "_save", "(", ")", "return", "row", "[", "'_uid'", "]" ]
Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type.
[ "Insert", "a", "row", "into", "the", ".", "csv", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L117-L149
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.update_by_uid
def update_by_uid(self, uid, data): """Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = self._table[uid+1] for key, val in data.items(): if key == '_uid' or key == '_default': continue if key not in self._headers: logging.warn("Unknown column name: {0}".format(key)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._save()
python
def update_by_uid(self, uid, data): """Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = self._table[uid+1] for key, val in data.items(): if key == '_uid' or key == '_default': continue if key not in self._headers: logging.warn("Unknown column name: {0}".format(key)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._save()
[ "def", "update_by_uid", "(", "self", ",", "uid", ",", "data", ")", ":", "row", "=", "self", ".", "_table", "[", "uid", "+", "1", "]", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "if", "key", "==", "'_uid'", "or", "key", "==", "'_default'", ":", "continue", "if", "key", "not", "in", "self", ".", "_headers", ":", "logging", ".", "warn", "(", "\"Unknown column name: {0}\"", ".", "format", "(", "key", ")", ")", "continue", "if", "not", "isinstance", "(", "val", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ")", ":", "raise", "Exception", "(", "'Data type mismatch for column {0}. Expected: {1}, got: {2}'", ".", "format", "(", "key", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ",", "type", "(", "val", ")", ")", ")", "row", "[", "key", "]", "=", "val", "self", ".", "_save", "(", ")" ]
Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type.
[ "Update", "a", "row", "with", "the", "given", "data", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L151-L178
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_col
def get_col(self, col_name, filter = lambda _ : True): """Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model """ if col_name not in self._headers: raise ValueError("{} not found! Model has headers: {}".format(col_name, self._headers)) col = [] for i in range(self.num_rows): row = self._table[i + 1] val = row[col_name] if filter(val): col.append(val) return col
python
def get_col(self, col_name, filter = lambda _ : True): """Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model """ if col_name not in self._headers: raise ValueError("{} not found! Model has headers: {}".format(col_name, self._headers)) col = [] for i in range(self.num_rows): row = self._table[i + 1] val = row[col_name] if filter(val): col.append(val) return col
[ "def", "get_col", "(", "self", ",", "col_name", ",", "filter", "=", "lambda", "_", ":", "True", ")", ":", "if", "col_name", "not", "in", "self", ".", "_headers", ":", "raise", "ValueError", "(", "\"{} not found! Model has headers: {}\"", ".", "format", "(", "col_name", ",", "self", ".", "_headers", ")", ")", "col", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_rows", ")", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "val", "=", "row", "[", "col_name", "]", "if", "filter", "(", "val", ")", ":", "col", ".", "append", "(", "val", ")", "return", "col" ]
Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model
[ "Return", "all", "values", "in", "the", "column", "corresponding", "to", "col_name", "that", "satisfies", "filter", "which", "is", "a", "function", "that", "takes", "in", "a", "value", "of", "the", "column", "s", "type", "and", "returns", "True", "or", "False" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L212-L243
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_by_cols
def get_by_cols(self, cols, direction=1): """Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column. """ if direction == 1: iterator = range(self.num_rows) elif direction == -1: iterator = range(self.num_rows-1, -1, -1) else: raise ValueError("Direction can only be 1 (first) or -1 (last). Got: {0}".format(direction)) for i in iterator: row = self._table[i+1] all_sat = True for key, val in cols.items(): if row[key] != val: all_sat = False break if all_sat: return row.copy() return None
python
def get_by_cols(self, cols, direction=1): """Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column. """ if direction == 1: iterator = range(self.num_rows) elif direction == -1: iterator = range(self.num_rows-1, -1, -1) else: raise ValueError("Direction can only be 1 (first) or -1 (last). Got: {0}".format(direction)) for i in iterator: row = self._table[i+1] all_sat = True for key, val in cols.items(): if row[key] != val: all_sat = False break if all_sat: return row.copy() return None
[ "def", "get_by_cols", "(", "self", ",", "cols", ",", "direction", "=", "1", ")", ":", "if", "direction", "==", "1", ":", "iterator", "=", "range", "(", "self", ".", "num_rows", ")", "elif", "direction", "==", "-", "1", ":", "iterator", "=", "range", "(", "self", ".", "num_rows", "-", "1", ",", "-", "1", ",", "-", "1", ")", "else", ":", "raise", "ValueError", "(", "\"Direction can only be 1 (first) or -1 (last). Got: {0}\"", ".", "format", "(", "direction", ")", ")", "for", "i", "in", "iterator", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "all_sat", "=", "True", "for", "key", ",", "val", "in", "cols", ".", "items", "(", ")", ":", "if", "row", "[", "key", "]", "!=", "val", ":", "all_sat", "=", "False", "break", "if", "all_sat", ":", "return", "row", ".", "copy", "(", ")", "return", "None" ]
Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column.
[ "Return", "the", "first", "or", "last", "row", "that", "satisfies", "the", "given", "col", "value", "constraints", "or", "None", "if", "no", "row", "contains", "the", "given", "value", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L245-L282
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_rows_by_cols
def get_rows_by_cols(self, matching_dict): """Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict """ result = [] for i in range(self.num_rows): row = self._table[i+1] matching = True for key, val in matching_dict.items(): if row[key] != val: matching = False break if matching: result.append(row) return result
python
def get_rows_by_cols(self, matching_dict): """Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict """ result = [] for i in range(self.num_rows): row = self._table[i+1] matching = True for key, val in matching_dict.items(): if row[key] != val: matching = False break if matching: result.append(row) return result
[ "def", "get_rows_by_cols", "(", "self", ",", "matching_dict", ")", ":", "result", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_rows", ")", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "matching", "=", "True", "for", "key", ",", "val", "in", "matching_dict", ".", "items", "(", ")", ":", "if", "row", "[", "key", "]", "!=", "val", ":", "matching", "=", "False", "break", "if", "matching", ":", "result", ".", "append", "(", "row", ")", "return", "result" ]
Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict
[ "Return", "all", "rows", "where", "the", "cols", "match", "the", "elements", "given", "in", "the", "matching_dict" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L326-L351
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.next
def next(self): """ Returns the next row in the CSV, for iteration """ if self._cur_row >= len(self._table): raise StopIteration data = self._table[self._cur_row].copy() self._cur_row += 1 return data
python
def next(self): """ Returns the next row in the CSV, for iteration """ if self._cur_row >= len(self._table): raise StopIteration data = self._table[self._cur_row].copy() self._cur_row += 1 return data
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_cur_row", ">=", "len", "(", "self", ".", "_table", ")", ":", "raise", "StopIteration", "data", "=", "self", ".", "_table", "[", "self", ".", "_cur_row", "]", ".", "copy", "(", ")", "self", ".", "_cur_row", "+=", "1", "return", "data" ]
Returns the next row in the CSV, for iteration
[ "Returns", "the", "next", "row", "in", "the", "CSV", "for", "iteration" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L358-L364
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.load
def load(full_filename): """Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed. """ with open(full_filename, 'r') as file: reader = csv.DictReader(file) headers = reader.fieldnames if '_uid' not in headers or '_default' not in headers: raise Exception("Malformed CSVModel file!") all_rows = [row for row in reader] types = all_rows[0] table = [types] default_entry = table[0]['_default'] for i in range(1, len(all_rows)): raw_row = all_rows[i] row = {} for column_name in headers: if raw_row[column_name] != default_entry and column_name != '': if types[column_name] == 'bool': row[column_name] = CSVModel._str_to_bool(raw_row[column_name]) else: try: row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](raw_row[column_name]) except: logging.error('{}, {}, {}'.format(column_name, types[column_name], raw_row[column_name])) row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](bool(raw_row[column_name])) else: row[column_name] = default_entry table.append(row) if len(table) == 1: next_valid_uid = 0 else: next_valid_uid = int(table[-1]['_uid']) + 1 headers_init = headers[1:-1] types_init = [types[column_name] for column_name in headers_init] headers_types_list = zip(headers_init, types_init) csv_model = CSVModel(full_filename, headers_types_list, default_entry=default_entry) csv_model._uid = next_valid_uid csv_model._table = table csv_model._save() return csv_model
python
def load(full_filename): """Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed. """ with open(full_filename, 'r') as file: reader = csv.DictReader(file) headers = reader.fieldnames if '_uid' not in headers or '_default' not in headers: raise Exception("Malformed CSVModel file!") all_rows = [row for row in reader] types = all_rows[0] table = [types] default_entry = table[0]['_default'] for i in range(1, len(all_rows)): raw_row = all_rows[i] row = {} for column_name in headers: if raw_row[column_name] != default_entry and column_name != '': if types[column_name] == 'bool': row[column_name] = CSVModel._str_to_bool(raw_row[column_name]) else: try: row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](raw_row[column_name]) except: logging.error('{}, {}, {}'.format(column_name, types[column_name], raw_row[column_name])) row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](bool(raw_row[column_name])) else: row[column_name] = default_entry table.append(row) if len(table) == 1: next_valid_uid = 0 else: next_valid_uid = int(table[-1]['_uid']) + 1 headers_init = headers[1:-1] types_init = [types[column_name] for column_name in headers_init] headers_types_list = zip(headers_init, types_init) csv_model = CSVModel(full_filename, headers_types_list, default_entry=default_entry) csv_model._uid = next_valid_uid csv_model._table = table csv_model._save() return csv_model
[ "def", "load", "(", "full_filename", ")", ":", "with", "open", "(", "full_filename", ",", "'r'", ")", "as", "file", ":", "reader", "=", "csv", ".", "DictReader", "(", "file", ")", "headers", "=", "reader", ".", "fieldnames", "if", "'_uid'", "not", "in", "headers", "or", "'_default'", "not", "in", "headers", ":", "raise", "Exception", "(", "\"Malformed CSVModel file!\"", ")", "all_rows", "=", "[", "row", "for", "row", "in", "reader", "]", "types", "=", "all_rows", "[", "0", "]", "table", "=", "[", "types", "]", "default_entry", "=", "table", "[", "0", "]", "[", "'_default'", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "all_rows", ")", ")", ":", "raw_row", "=", "all_rows", "[", "i", "]", "row", "=", "{", "}", "for", "column_name", "in", "headers", ":", "if", "raw_row", "[", "column_name", "]", "!=", "default_entry", "and", "column_name", "!=", "''", ":", "if", "types", "[", "column_name", "]", "==", "'bool'", ":", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_str_to_bool", "(", "raw_row", "[", "column_name", "]", ")", "else", ":", "try", ":", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "types", "[", "column_name", "]", "]", "(", "raw_row", "[", "column_name", "]", ")", "except", ":", "logging", ".", "error", "(", "'{}, {}, {}'", ".", "format", "(", "column_name", ",", "types", "[", "column_name", "]", ",", "raw_row", "[", "column_name", "]", ")", ")", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "types", "[", "column_name", "]", "]", "(", "bool", "(", "raw_row", "[", "column_name", "]", ")", ")", "else", ":", "row", "[", "column_name", "]", "=", "default_entry", "table", ".", "append", "(", "row", ")", "if", "len", "(", "table", ")", "==", "1", ":", "next_valid_uid", "=", "0", "else", ":", "next_valid_uid", "=", "int", "(", "table", "[", "-", "1", "]", "[", "'_uid'", "]", ")", "+", "1", "headers_init", "=", "headers", "[", "1", ":", "-", "1", "]", "types_init", "=", "[", "types", "[", "column_name", "]", "for", "column_name", "in", "headers_init", "]", "headers_types_list", "=", "zip", "(", "headers_init", ",", "types_init", ")", "csv_model", "=", "CSVModel", "(", "full_filename", ",", "headers_types_list", ",", "default_entry", "=", "default_entry", ")", "csv_model", ".", "_uid", "=", "next_valid_uid", "csv_model", ".", "_table", "=", "table", "csv_model", ".", "_save", "(", ")", "return", "csv_model" ]
Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L379-L438
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_or_create
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
python
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
[ "def", "get_or_create", "(", "full_filename", ",", "headers_types", "=", "None", ",", "default_entry", "=", "''", ")", ":", "# convert dictionaries to list", "if", "isinstance", "(", "headers_types", ",", "dict", ")", ":", "headers_types_list", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "headers_types", ".", "items", "(", ")", "]", "headers_types", "=", "headers_types_list", "if", "os", ".", "path", ".", "isfile", "(", "full_filename", ")", ":", "return", "CSVModel", ".", "load", "(", "full_filename", ")", "else", ":", "return", "CSVModel", "(", "full_filename", ",", "headers_types", ",", "default_entry", "=", "default_entry", ")" ]
Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "if", "the", "file", "exists", "or", "create", "a", "new", "CSVModel", "with", "the", "given", "filename", "if", "the", "file", "does", "not", "exist", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L441-L472
train
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
projection_matrix
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: # preserve relative depth M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: # parallel projection direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: # orthogonal projection M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
python
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: # preserve relative depth M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: # parallel projection direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: # orthogonal projection M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
[ "def", "projection_matrix", "(", "point", ",", "normal", ",", "direction", "=", "None", ",", "perspective", "=", "None", ",", "pseudo", "=", "False", ")", ":", "M", "=", "numpy", ".", "identity", "(", "4", ")", "point", "=", "numpy", ".", "array", "(", "point", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "normal", "=", "unit_vector", "(", "normal", "[", ":", "3", "]", ")", "if", "perspective", "is", "not", "None", ":", "# perspective projection", "perspective", "=", "numpy", ".", "array", "(", "perspective", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "M", "[", "0", ",", "0", "]", "=", "M", "[", "1", ",", "1", "]", "=", "M", "[", "2", ",", "2", "]", "=", "numpy", ".", "dot", "(", "perspective", "-", "point", ",", "normal", ")", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "perspective", ",", "normal", ")", "if", "pseudo", ":", "# preserve relative depth", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "normal", ",", "normal", ")", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "(", "perspective", "+", "normal", ")", "else", ":", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "perspective", "M", "[", "3", ",", ":", "3", "]", "=", "-", "normal", "M", "[", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "perspective", ",", "normal", ")", "elif", "direction", "is", "not", "None", ":", "# parallel projection", "direction", "=", "numpy", ".", "array", "(", "direction", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "scale", "=", "numpy", ".", "dot", "(", "direction", ",", "normal", ")", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "direction", ",", "normal", ")", "/", "scale", "M", "[", ":", "3", ",", "3", "]", "=", "direction", "*", "(", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "/", "scale", ")", "else", ":", "# orthogonal projection", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "normal", ",", "normal", ")", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "normal", "return", "M" ]
Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True
[ "Return", "matrix", "to", "project", "onto", "plane", "defined", "by", "point", "and", "normal", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L437-L496
train
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
projection_from_matrix
def projection_from_matrix(matrix, pseudo=False): """Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True """ M = numpy.array(matrix, dtype=numpy.float64, copy=False) M33 = M[:3, :3] l, V = numpy.linalg.eig(M) i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] if not pseudo and len(i): # point: any eigenvector corresponding to eigenvalue 1 point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] # direction: unit eigenvector corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 0") direction = numpy.real(V[:, i[0]]).squeeze() direction /= vector_norm(direction) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33.T) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if len(i): # parallel projection normal = numpy.real(V[:, i[0]]).squeeze() normal /= vector_norm(normal) return point, normal, direction, None, False else: # orthogonal projection, where normal equals direction vector return point, direction, None, None, False else: # perspective projection i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] if not len(i): raise ValueError( "no eigenvector not corresponding to eigenvalue 0") point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] normal = - M[3, :3] perspective = M[:3, 3] / numpy.dot(point[:3], normal) if pseudo: perspective -= normal return point, normal, None, perspective, pseudo
python
def projection_from_matrix(matrix, pseudo=False): """Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True """ M = numpy.array(matrix, dtype=numpy.float64, copy=False) M33 = M[:3, :3] l, V = numpy.linalg.eig(M) i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] if not pseudo and len(i): # point: any eigenvector corresponding to eigenvalue 1 point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] # direction: unit eigenvector corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 0") direction = numpy.real(V[:, i[0]]).squeeze() direction /= vector_norm(direction) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33.T) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if len(i): # parallel projection normal = numpy.real(V[:, i[0]]).squeeze() normal /= vector_norm(normal) return point, normal, direction, None, False else: # orthogonal projection, where normal equals direction vector return point, direction, None, None, False else: # perspective projection i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] if not len(i): raise ValueError( "no eigenvector not corresponding to eigenvalue 0") point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] normal = - M[3, :3] perspective = M[:3, 3] / numpy.dot(point[:3], normal) if pseudo: perspective -= normal return point, normal, None, perspective, pseudo
[ "def", "projection_from_matrix", "(", "matrix", ",", "pseudo", "=", "False", ")", ":", "M", "=", "numpy", ".", "array", "(", "matrix", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "M33", "=", "M", "[", ":", "3", ",", ":", "3", "]", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", "-", "1.0", ")", "<", "1e-8", ")", "[", "0", "]", "if", "not", "pseudo", "and", "len", "(", "i", ")", ":", "# point: any eigenvector corresponding to eigenvalue 1", "point", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "-", "1", "]", "]", ")", ".", "squeeze", "(", ")", "point", "/=", "point", "[", "3", "]", "# direction: unit eigenvector corresponding to eigenvalue 0", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M33", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", "<", "1e-8", ")", "[", "0", "]", "if", "not", "len", "(", "i", ")", ":", "raise", "ValueError", "(", "\"no eigenvector corresponding to eigenvalue 0\"", ")", "direction", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "0", "]", "]", ")", ".", "squeeze", "(", ")", "direction", "/=", "vector_norm", "(", "direction", ")", "# normal: unit eigenvector of M33.T corresponding to eigenvalue 0", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M33", ".", "T", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", "<", "1e-8", ")", "[", "0", "]", "if", "len", "(", "i", ")", ":", "# parallel projection", "normal", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "0", "]", "]", ")", ".", "squeeze", "(", ")", "normal", "/=", "vector_norm", "(", "normal", ")", "return", "point", ",", "normal", ",", "direction", ",", "None", ",", "False", "else", ":", "# orthogonal projection, where normal equals direction vector", "return", "point", ",", "direction", ",", "None", ",", "None", ",", "False", "else", ":", "# perspective projection", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", ">", "1e-8", ")", "[", "0", "]", "if", "not", "len", "(", "i", ")", ":", "raise", "ValueError", "(", "\"no eigenvector not corresponding to eigenvalue 0\"", ")", "point", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "-", "1", "]", "]", ")", ".", "squeeze", "(", ")", "point", "/=", "point", "[", "3", "]", "normal", "=", "-", "M", "[", "3", ",", ":", "3", "]", "perspective", "=", "M", "[", ":", "3", ",", "3", "]", "/", "numpy", ".", "dot", "(", "point", "[", ":", "3", "]", ",", "normal", ")", "if", "pseudo", ":", "perspective", "-=", "normal", "return", "point", ",", "normal", ",", "None", ",", "perspective", ",", "pseudo" ]
Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True
[ "Return", "projection", "plane", "and", "perspective", "point", "from", "projection", "matrix", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L499-L569
train
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
unit_vector
def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] """ if out is None: data = numpy.array(data, dtype=numpy.float64, copy=True) if data.ndim == 1: data /= math.sqrt(numpy.dot(data, data)) return data else: if out is not data: out[:] = numpy.array(data, copy=False) data = out length = numpy.atleast_1d(numpy.sum(data*data, axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) data /= length if out is None: return data
python
def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] """ if out is None: data = numpy.array(data, dtype=numpy.float64, copy=True) if data.ndim == 1: data /= math.sqrt(numpy.dot(data, data)) return data else: if out is not data: out[:] = numpy.array(data, copy=False) data = out length = numpy.atleast_1d(numpy.sum(data*data, axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) data /= length if out is None: return data
[ "def", "unit_vector", "(", "data", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "data", "=", "numpy", ".", "array", "(", "data", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "True", ")", "if", "data", ".", "ndim", "==", "1", ":", "data", "/=", "math", ".", "sqrt", "(", "numpy", ".", "dot", "(", "data", ",", "data", ")", ")", "return", "data", "else", ":", "if", "out", "is", "not", "data", ":", "out", "[", ":", "]", "=", "numpy", ".", "array", "(", "data", ",", "copy", "=", "False", ")", "data", "=", "out", "length", "=", "numpy", ".", "atleast_1d", "(", "numpy", ".", "sum", "(", "data", "*", "data", ",", "axis", ")", ")", "numpy", ".", "sqrt", "(", "length", ",", "length", ")", "if", "axis", "is", "not", "None", ":", "length", "=", "numpy", ".", "expand_dims", "(", "length", ",", "axis", ")", "data", "/=", "length", "if", "out", "is", "None", ":", "return", "data" ]
Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0]
[ "Return", "ndarray", "normalized", "by", "length", "i", ".", "e", ".", "eucledian", "norm", "along", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L1574-L1615
train
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
json_numpy_obj_hook
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding. """ if isinstance(dct, dict) and '__ndarray__' in dct: data = np.asarray(dct['__ndarray__'], dtype=dct['dtype']) return data.reshape(dct['shape']) return dct
python
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding. """ if isinstance(dct, dict) and '__ndarray__' in dct: data = np.asarray(dct['__ndarray__'], dtype=dct['dtype']) return data.reshape(dct['shape']) return dct
[ "def", "json_numpy_obj_hook", "(", "dct", ")", ":", "if", "isinstance", "(", "dct", ",", "dict", ")", "and", "'__ndarray__'", "in", "dct", ":", "data", "=", "np", ".", "asarray", "(", "dct", "[", "'__ndarray__'", "]", ",", "dtype", "=", "dct", "[", "'dtype'", "]", ")", "return", "data", ".", "reshape", "(", "dct", "[", "'shape'", "]", ")", "return", "dct" ]
Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding.
[ "Decodes", "a", "previously", "encoded", "numpy", "ndarray", "with", "proper", "shape", "and", "dtype", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L45-L61
train
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
dump
def dump(*args, **kwargs): """Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer. """ kwargs.update(dict(cls=NumpyEncoder, sort_keys=True, indent=4, separators=(',', ': '))) return _json.dump(*args, **kwargs)
python
def dump(*args, **kwargs): """Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer. """ kwargs.update(dict(cls=NumpyEncoder, sort_keys=True, indent=4, separators=(',', ': '))) return _json.dump(*args, **kwargs)
[ "def", "dump", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "dict", "(", "cls", "=", "NumpyEncoder", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "return", "_json", ".", "dump", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer.
[ "Dump", "a", "numpy", ".", "ndarray", "to", "file", "stream", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L63-L73
train
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
load
def load(*args, **kwargs): """Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer. """ kwargs.update(dict(object_hook=json_numpy_obj_hook)) return _json.load(*args, **kwargs)
python
def load(*args, **kwargs): """Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer. """ kwargs.update(dict(object_hook=json_numpy_obj_hook)) return _json.load(*args, **kwargs)
[ "def", "load", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "dict", "(", "object_hook", "=", "json_numpy_obj_hook", ")", ")", "return", "_json", ".", "load", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer.
[ "Load", "an", "numpy", ".", "ndarray", "from", "a", "file", "stream", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L75-L82
train
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
NumpyEncoder.default
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
python
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "return", "dict", "(", "__ndarray__", "=", "obj", ".", "tolist", "(", ")", ",", "dtype", "=", "str", "(", "obj", ".", "dtype", ")", ",", "shape", "=", "obj", ".", "shape", ")", "# Let the base class default method raise the TypeError", "return", "_json", ".", "JSONEncoder", "(", "self", ",", "obj", ")" ]
Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray.
[ "Converts", "an", "ndarray", "into", "a", "dictionary", "for", "efficient", "serialization", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L15-L43
train
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
RandomVariable._preallocate_samples
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
python
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
[ "def", "_preallocate_samples", "(", "self", ")", ":", "self", ".", "prealloc_samples_", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_prealloc_samples_", ")", ":", "self", ".", "prealloc_samples_", ".", "append", "(", "self", ".", "sample", "(", ")", ")" ]
Preallocate samples for faster adaptive sampling.
[ "Preallocate", "samples", "for", "faster", "adaptive", "sampling", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L30-L35
train
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
RandomVariable.rvs
def rvs(self, size=1, iteration=1): """Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array. """ if self.num_prealloc_samples_ > 0: samples = [] for i in range(size): samples.append(self.prealloc_samples_[(iteration + i) % self.num_prealloc_samples_]) if size == 1: return samples[0] return samples # generate a new sample return self.sample(size=size)
python
def rvs(self, size=1, iteration=1): """Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array. """ if self.num_prealloc_samples_ > 0: samples = [] for i in range(size): samples.append(self.prealloc_samples_[(iteration + i) % self.num_prealloc_samples_]) if size == 1: return samples[0] return samples # generate a new sample return self.sample(size=size)
[ "def", "rvs", "(", "self", ",", "size", "=", "1", ",", "iteration", "=", "1", ")", ":", "if", "self", ".", "num_prealloc_samples_", ">", "0", ":", "samples", "=", "[", "]", "for", "i", "in", "range", "(", "size", ")", ":", "samples", ".", "append", "(", "self", ".", "prealloc_samples_", "[", "(", "iteration", "+", "i", ")", "%", "self", ".", "num_prealloc_samples_", "]", ")", "if", "size", "==", "1", ":", "return", "samples", "[", "0", "]", "return", "samples", "# generate a new sample", "return", "self", ".", "sample", "(", "size", "=", "size", ")" ]
Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array.
[ "Sample", "the", "random", "variable", "using", "the", "preallocated", "samples", "if", "possible", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L54-L81
train
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
IsotropicGaussianRigidTransformRandomVariable.sample
def sample(self, size=1): """ Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations """ samples = [] for i in range(size): # sample random pose xi = self._r_xi_rv.rvs(size=1) S_xi = skew(xi) R_sample = scipy.linalg.expm(S_xi) t_sample = self._t_rv.rvs(size=1) samples.append(RigidTransform(rotation=R_sample, translation=t_sample, from_frame=self._from_frame, to_frame=self._to_frame)) # not a list if only 1 sample if size == 1 and len(samples) > 0: return samples[0] return samples
python
def sample(self, size=1): """ Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations """ samples = [] for i in range(size): # sample random pose xi = self._r_xi_rv.rvs(size=1) S_xi = skew(xi) R_sample = scipy.linalg.expm(S_xi) t_sample = self._t_rv.rvs(size=1) samples.append(RigidTransform(rotation=R_sample, translation=t_sample, from_frame=self._from_frame, to_frame=self._to_frame)) # not a list if only 1 sample if size == 1 and len(samples) > 0: return samples[0] return samples
[ "def", "sample", "(", "self", ",", "size", "=", "1", ")", ":", "samples", "=", "[", "]", "for", "i", "in", "range", "(", "size", ")", ":", "# sample random pose", "xi", "=", "self", ".", "_r_xi_rv", ".", "rvs", "(", "size", "=", "1", ")", "S_xi", "=", "skew", "(", "xi", ")", "R_sample", "=", "scipy", ".", "linalg", ".", "expm", "(", "S_xi", ")", "t_sample", "=", "self", ".", "_t_rv", ".", "rvs", "(", "size", "=", "1", ")", "samples", ".", "append", "(", "RigidTransform", "(", "rotation", "=", "R_sample", ",", "translation", "=", "t_sample", ",", "from_frame", "=", "self", ".", "_from_frame", ",", "to_frame", "=", "self", ".", "_to_frame", ")", ")", "# not a list if only 1 sample", "if", "size", "==", "1", "and", "len", "(", "samples", ")", ">", "0", ":", "return", "samples", "[", "0", "]", "return", "samples" ]
Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations
[ "Sample", "rigid", "transform", "random", "variables", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L221-L249
train
BerkeleyAutomation/autolab_core
autolab_core/data_stream_recorder.py
DataStreamRecorder._flush
def _flush(self): """ Returns a list of all current data """ if self._recording: raise Exception("Cannot flush data queue while recording!") if self._saving_cache: logging.warn("Flush when using cache means unsaved data will be lost and not returned!") self._cmds_q.put(("reset_data_segment",)) else: data = self._extract_q(0) return data
python
def _flush(self): """ Returns a list of all current data """ if self._recording: raise Exception("Cannot flush data queue while recording!") if self._saving_cache: logging.warn("Flush when using cache means unsaved data will be lost and not returned!") self._cmds_q.put(("reset_data_segment",)) else: data = self._extract_q(0) return data
[ "def", "_flush", "(", "self", ")", ":", "if", "self", ".", "_recording", ":", "raise", "Exception", "(", "\"Cannot flush data queue while recording!\"", ")", "if", "self", ".", "_saving_cache", ":", "logging", ".", "warn", "(", "\"Flush when using cache means unsaved data will be lost and not returned!\"", ")", "self", ".", "_cmds_q", ".", "put", "(", "(", "\"reset_data_segment\"", ",", ")", ")", "else", ":", "data", "=", "self", ".", "_extract_q", "(", "0", ")", "return", "data" ]
Returns a list of all current data
[ "Returns", "a", "list", "of", "all", "current", "data" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/data_stream_recorder.py#L193-L202
train
BerkeleyAutomation/autolab_core
autolab_core/data_stream_recorder.py
DataStreamRecorder._stop
def _stop(self): """ Stops recording. Returns all recorded data and their timestamps. Destroys recorder process.""" self._pause() self._cmds_q.put(("stop",)) try: self._recorder.terminate() except Exception: pass self._recording = False
python
def _stop(self): """ Stops recording. Returns all recorded data and their timestamps. Destroys recorder process.""" self._pause() self._cmds_q.put(("stop",)) try: self._recorder.terminate() except Exception: pass self._recording = False
[ "def", "_stop", "(", "self", ")", ":", "self", ".", "_pause", "(", ")", "self", ".", "_cmds_q", ".", "put", "(", "(", "\"stop\"", ",", ")", ")", "try", ":", "self", ".", "_recorder", ".", "terminate", "(", ")", "except", "Exception", ":", "pass", "self", ".", "_recording", "=", "False" ]
Stops recording. Returns all recorded data and their timestamps. Destroys recorder process.
[ "Stops", "recording", ".", "Returns", "all", "recorded", "data", "and", "their", "timestamps", ".", "Destroys", "recorder", "process", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/data_stream_recorder.py#L209-L217
train
BerkeleyAutomation/autolab_core
autolab_core/completer.py
Completer._listdir
def _listdir(self, root): "List directory 'root' appending the path separator to subdirs." res = [] for name in os.listdir(root): path = os.path.join(root, name) if os.path.isdir(path): name += os.sep res.append(name) return res
python
def _listdir(self, root): "List directory 'root' appending the path separator to subdirs." res = [] for name in os.listdir(root): path = os.path.join(root, name) if os.path.isdir(path): name += os.sep res.append(name) return res
[ "def", "_listdir", "(", "self", ",", "root", ")", ":", "res", "=", "[", "]", "for", "name", "in", "os", ".", "listdir", "(", "root", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "name", "+=", "os", ".", "sep", "res", ".", "append", "(", "name", ")", "return", "res" ]
List directory 'root' appending the path separator to subdirs.
[ "List", "directory", "root", "appending", "the", "path", "separator", "to", "subdirs", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/completer.py#L24-L32
train
BerkeleyAutomation/autolab_core
autolab_core/completer.py
Completer.complete_extra
def complete_extra(self, args): "Completions for the 'extra' command." # treat the last arg as a path and complete it if len(args) == 0: return self._listdir('./') return self._complete_path(args[-1])
python
def complete_extra(self, args): "Completions for the 'extra' command." # treat the last arg as a path and complete it if len(args) == 0: return self._listdir('./') return self._complete_path(args[-1])
[ "def", "complete_extra", "(", "self", ",", "args", ")", ":", "# treat the last arg as a path and complete it", "if", "len", "(", "args", ")", "==", "0", ":", "return", "self", ".", "_listdir", "(", "'./'", ")", "return", "self", ".", "_complete_path", "(", "args", "[", "-", "1", "]", ")" ]
Completions for the 'extra' command.
[ "Completions", "for", "the", "extra", "command", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/completer.py#L51-L56
train
BerkeleyAutomation/autolab_core
autolab_core/completer.py
Completer.complete
def complete(self, text, state): "Generic readline completion entry point." # dexnet entity tab completion results = [w for w in self.words if w.startswith(text)] + [None] if results != [None]: return results[state] buffer = readline.get_line_buffer() line = readline.get_line_buffer().split() # dexnet entity tab completion results = [w for w in self.words if w.startswith(text)] + [None] if results != [None]: return results[state] # account for last argument ending in a space if RE_SPACE.match(buffer): line.append('') return (self.complete_extra(line) + [None])[state]
python
def complete(self, text, state): "Generic readline completion entry point." # dexnet entity tab completion results = [w for w in self.words if w.startswith(text)] + [None] if results != [None]: return results[state] buffer = readline.get_line_buffer() line = readline.get_line_buffer().split() # dexnet entity tab completion results = [w for w in self.words if w.startswith(text)] + [None] if results != [None]: return results[state] # account for last argument ending in a space if RE_SPACE.match(buffer): line.append('') return (self.complete_extra(line) + [None])[state]
[ "def", "complete", "(", "self", ",", "text", ",", "state", ")", ":", "# dexnet entity tab completion", "results", "=", "[", "w", "for", "w", "in", "self", ".", "words", "if", "w", ".", "startswith", "(", "text", ")", "]", "+", "[", "None", "]", "if", "results", "!=", "[", "None", "]", ":", "return", "results", "[", "state", "]", "buffer", "=", "readline", ".", "get_line_buffer", "(", ")", "line", "=", "readline", ".", "get_line_buffer", "(", ")", ".", "split", "(", ")", "# dexnet entity tab completion", "results", "=", "[", "w", "for", "w", "in", "self", ".", "words", "if", "w", ".", "startswith", "(", "text", ")", "]", "+", "[", "None", "]", "if", "results", "!=", "[", "None", "]", ":", "return", "results", "[", "state", "]", "# account for last argument ending in a space", "if", "RE_SPACE", ".", "match", "(", "buffer", ")", ":", "line", ".", "append", "(", "''", ")", "return", "(", "self", ".", "complete_extra", "(", "line", ")", "+", "[", "None", "]", ")", "[", "state", "]" ]
Generic readline completion entry point.
[ "Generic", "readline", "completion", "entry", "point", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/completer.py#L58-L78
train
BerkeleyAutomation/autolab_core
autolab_core/data_stream_syncer.py
DataStreamSyncer.stop
def stop(self): """ Stops syncer operations. Destroys syncer process. """ self._cmds_q.put(("stop",)) for recorder in self._data_stream_recorders: recorder._stop() try: self._syncer.terminate() except Exception: pass
python
def stop(self): """ Stops syncer operations. Destroys syncer process. """ self._cmds_q.put(("stop",)) for recorder in self._data_stream_recorders: recorder._stop() try: self._syncer.terminate() except Exception: pass
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_cmds_q", ".", "put", "(", "(", "\"stop\"", ",", ")", ")", "for", "recorder", "in", "self", ".", "_data_stream_recorders", ":", "recorder", ".", "_stop", "(", ")", "try", ":", "self", ".", "_syncer", ".", "terminate", "(", ")", "except", "Exception", ":", "pass" ]
Stops syncer operations. Destroys syncer process.
[ "Stops", "syncer", "operations", ".", "Destroys", "syncer", "process", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/data_stream_syncer.py#L123-L131
train
BerkeleyAutomation/autolab_core
autolab_core/logger.py
configure_root
def configure_root(): """Configure the root logger.""" root_logger = logging.getLogger() # clear any existing handles to streams because we don't want duplicate logs # NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM, which is usually the case(because it is stdout). This is fine because we will be re-creating that handle. Otherwise we might be deleting a handle that won't be re-created, which could result in dropped logs. for hdlr in root_logger.handlers: if isinstance(hdlr, logging.StreamHandler): root_logger.removeHandler(hdlr) # configure the root logger root_logger.setLevel(ROOT_LOG_LEVEL) hdlr = logging.StreamHandler(ROOT_LOG_STREAM) formatter = colorlog.ColoredFormatter( '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s', reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', } ) hdlr.setFormatter(formatter) root_logger.addHandler(hdlr)
python
def configure_root(): """Configure the root logger.""" root_logger = logging.getLogger() # clear any existing handles to streams because we don't want duplicate logs # NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM, which is usually the case(because it is stdout). This is fine because we will be re-creating that handle. Otherwise we might be deleting a handle that won't be re-created, which could result in dropped logs. for hdlr in root_logger.handlers: if isinstance(hdlr, logging.StreamHandler): root_logger.removeHandler(hdlr) # configure the root logger root_logger.setLevel(ROOT_LOG_LEVEL) hdlr = logging.StreamHandler(ROOT_LOG_STREAM) formatter = colorlog.ColoredFormatter( '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s', reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', } ) hdlr.setFormatter(formatter) root_logger.addHandler(hdlr)
[ "def", "configure_root", "(", ")", ":", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "# clear any existing handles to streams because we don't want duplicate logs", "# NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM, which is usually the case(because it is stdout). This is fine because we will be re-creating that handle. Otherwise we might be deleting a handle that won't be re-created, which could result in dropped logs.", "for", "hdlr", "in", "root_logger", ".", "handlers", ":", "if", "isinstance", "(", "hdlr", ",", "logging", ".", "StreamHandler", ")", ":", "root_logger", ".", "removeHandler", "(", "hdlr", ")", "# configure the root logger", "root_logger", ".", "setLevel", "(", "ROOT_LOG_LEVEL", ")", "hdlr", "=", "logging", ".", "StreamHandler", "(", "ROOT_LOG_STREAM", ")", "formatter", "=", "colorlog", ".", "ColoredFormatter", "(", "'%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s'", ",", "reset", "=", "True", ",", "log_colors", "=", "{", "'DEBUG'", ":", "'cyan'", ",", "'INFO'", ":", "'green'", ",", "'WARNING'", ":", "'yellow'", ",", "'ERROR'", ":", "'red'", ",", "'CRITICAL'", ":", "'red,bg_white'", ",", "}", ")", "hdlr", ".", "setFormatter", "(", "formatter", ")", "root_logger", ".", "addHandler", "(", "hdlr", ")" ]
Configure the root logger.
[ "Configure", "the", "root", "logger", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/logger.py#L14-L39
train
BerkeleyAutomation/autolab_core
autolab_core/logger.py
add_root_log_file
def add_root_log_file(log_file): """ Add a log file to the root logger. Parameters ---------- log_file :obj:`str` The path to the log file. """ root_logger = logging.getLogger() # add a file handle to the root logger hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) root_logger.addHandler(hdlr) root_logger.info('Root logger now logging to {}'.format(log_file))
python
def add_root_log_file(log_file): """ Add a log file to the root logger. Parameters ---------- log_file :obj:`str` The path to the log file. """ root_logger = logging.getLogger() # add a file handle to the root logger hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) root_logger.addHandler(hdlr) root_logger.info('Root logger now logging to {}'.format(log_file))
[ "def", "add_root_log_file", "(", "log_file", ")", ":", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "# add a file handle to the root logger", "hdlr", "=", "logging", ".", "FileHandler", "(", "log_file", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)-10s %(levelname)-8s %(message)s'", ",", "datefmt", "=", "'%m-%d %H:%M:%S'", ")", "hdlr", ".", "setFormatter", "(", "formatter", ")", "root_logger", ".", "addHandler", "(", "hdlr", ")", "root_logger", ".", "info", "(", "'Root logger now logging to {}'", ".", "format", "(", "log_file", ")", ")" ]
Add a log file to the root logger. Parameters ---------- log_file :obj:`str` The path to the log file.
[ "Add", "a", "log", "file", "to", "the", "root", "logger", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/logger.py#L41-L57
train
BerkeleyAutomation/autolab_core
autolab_core/logger.py
Logger.add_log_file
def add_log_file(logger, log_file, global_log_file=False): """ Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger. """ if global_log_file: add_root_log_file(log_file) else: hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) logger.addHandler(hdlr)
python
def add_log_file(logger, log_file, global_log_file=False): """ Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger. """ if global_log_file: add_root_log_file(log_file) else: hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) logger.addHandler(hdlr)
[ "def", "add_log_file", "(", "logger", ",", "log_file", ",", "global_log_file", "=", "False", ")", ":", "if", "global_log_file", ":", "add_root_log_file", "(", "log_file", ")", "else", ":", "hdlr", "=", "logging", ".", "FileHandler", "(", "log_file", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)-10s %(levelname)-8s %(message)s'", ",", "datefmt", "=", "'%m-%d %H:%M:%S'", ")", "hdlr", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "hdlr", ")" ]
Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger.
[ "Add", "a", "log", "file", "to", "this", "logger", ".", "If", "global_log_file", "is", "true", "log_file", "will", "be", "handed", "the", "root", "logger", "otherwise", "it", "will", "only", "be", "used", "by", "this", "particular", "logger", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/logger.py#L128-L148
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
get_module_profile
def get_module_profile(module, name=None): """ Get or create a profile from a module and return it. If the name `module.profile` is present the value of that is returned. Otherwise, if the name `module.profile_factory` is present, a new profile is created using `module.profile_factory` and then `profile.auto_register` is called with the module namespace. If neither name is defined, the module is not considered a profile-module and None is returned. TODO: describe the `name` argument and better define the signature of `profile_factory`. The `module` argument is expected to behave like a python module. The optional `name` argument is used when `profile_factory` is called to give a name to the default section of the new profile. If name is not present `module.__name__` is the fallback. `profile_factory` is called like this: `profile = module.profile_factory(default_section=default_section)` """ try: # if profile is defined we just use it return module.profile except AttributeError: # > 'module' object has no attribute 'profile' # try to create one on the fly. # e.g. module.__name__ == "fontbakery.profiles.cmap" if 'profile_factory' not in module.__dict__: return None default_section = Section(name or module.__name__) profile = module.profile_factory(default_section=default_section) profile.auto_register(module.__dict__) return profile
python
def get_module_profile(module, name=None): """ Get or create a profile from a module and return it. If the name `module.profile` is present the value of that is returned. Otherwise, if the name `module.profile_factory` is present, a new profile is created using `module.profile_factory` and then `profile.auto_register` is called with the module namespace. If neither name is defined, the module is not considered a profile-module and None is returned. TODO: describe the `name` argument and better define the signature of `profile_factory`. The `module` argument is expected to behave like a python module. The optional `name` argument is used when `profile_factory` is called to give a name to the default section of the new profile. If name is not present `module.__name__` is the fallback. `profile_factory` is called like this: `profile = module.profile_factory(default_section=default_section)` """ try: # if profile is defined we just use it return module.profile except AttributeError: # > 'module' object has no attribute 'profile' # try to create one on the fly. # e.g. module.__name__ == "fontbakery.profiles.cmap" if 'profile_factory' not in module.__dict__: return None default_section = Section(name or module.__name__) profile = module.profile_factory(default_section=default_section) profile.auto_register(module.__dict__) return profile
[ "def", "get_module_profile", "(", "module", ",", "name", "=", "None", ")", ":", "try", ":", "# if profile is defined we just use it", "return", "module", ".", "profile", "except", "AttributeError", ":", "# > 'module' object has no attribute 'profile'", "# try to create one on the fly.", "# e.g. module.__name__ == \"fontbakery.profiles.cmap\"", "if", "'profile_factory'", "not", "in", "module", ".", "__dict__", ":", "return", "None", "default_section", "=", "Section", "(", "name", "or", "module", ".", "__name__", ")", "profile", "=", "module", ".", "profile_factory", "(", "default_section", "=", "default_section", ")", "profile", ".", "auto_register", "(", "module", ".", "__dict__", ")", "return", "profile" ]
Get or create a profile from a module and return it. If the name `module.profile` is present the value of that is returned. Otherwise, if the name `module.profile_factory` is present, a new profile is created using `module.profile_factory` and then `profile.auto_register` is called with the module namespace. If neither name is defined, the module is not considered a profile-module and None is returned. TODO: describe the `name` argument and better define the signature of `profile_factory`. The `module` argument is expected to behave like a python module. The optional `name` argument is used when `profile_factory` is called to give a name to the default section of the new profile. If name is not present `module.__name__` is the fallback. `profile_factory` is called like this: `profile = module.profile_factory(default_section=default_section)`
[ "Get", "or", "create", "a", "profile", "from", "a", "module", "and", "return", "it", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1626-L1659
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
CheckRunner.iterargs
def iterargs(self): """ uses the singular name as key """ iterargs = OrderedDict() for name in self._iterargs: plural = self._profile.iterargs[name] iterargs[name] = tuple(self._values[plural]) return iterargs
python
def iterargs(self): """ uses the singular name as key """ iterargs = OrderedDict() for name in self._iterargs: plural = self._profile.iterargs[name] iterargs[name] = tuple(self._values[plural]) return iterargs
[ "def", "iterargs", "(", "self", ")", ":", "iterargs", "=", "OrderedDict", "(", ")", "for", "name", "in", "self", ".", "_iterargs", ":", "plural", "=", "self", ".", "_profile", ".", "iterargs", "[", "name", "]", "iterargs", "[", "name", "]", "=", "tuple", "(", "self", ".", "_values", "[", "plural", "]", ")", "return", "iterargs" ]
uses the singular name as key
[ "uses", "the", "singular", "name", "as", "key" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L270-L276
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
CheckRunner._exec_check
def _exec_check(self, check: FontbakeryCallable, args: Dict[str, Any]): """ Yields check sub results. Each check result is a tuple of: (<Status>, mixed message) `status`: must be an instance of Status. If one of the `status` entries in one of the results is FAIL, the whole check is considered failed. WARN is most likely a PASS in a non strict mode and a FAIL in a strict mode. `message`: * If it is an `Exception` type we expect `status` not to be PASS * If it is a `string` it's a description of what passed or failed. * we'll think of an AdvancedMessageType as well, so that we can connect the check result with more in depth knowledge from the check definition. """ try: # A check can be either a normal function that returns one Status or a # generator that yields one or more. The latter will return a generator # object that we can detect with types.GeneratorType. result = check(**args) # Might raise. if isinstance(result, types.GeneratorType): # Iterate over sub-results one-by-one, list(result) would abort on # encountering the first exception. for sub_result in result: # Might raise. yield self._check_result(sub_result) return # Do not fall through to rest of method. except Exception as e: error = FailedCheckError(e) result = (ERROR, error) yield self._check_result(result)
python
def _exec_check(self, check: FontbakeryCallable, args: Dict[str, Any]): """ Yields check sub results. Each check result is a tuple of: (<Status>, mixed message) `status`: must be an instance of Status. If one of the `status` entries in one of the results is FAIL, the whole check is considered failed. WARN is most likely a PASS in a non strict mode and a FAIL in a strict mode. `message`: * If it is an `Exception` type we expect `status` not to be PASS * If it is a `string` it's a description of what passed or failed. * we'll think of an AdvancedMessageType as well, so that we can connect the check result with more in depth knowledge from the check definition. """ try: # A check can be either a normal function that returns one Status or a # generator that yields one or more. The latter will return a generator # object that we can detect with types.GeneratorType. result = check(**args) # Might raise. if isinstance(result, types.GeneratorType): # Iterate over sub-results one-by-one, list(result) would abort on # encountering the first exception. for sub_result in result: # Might raise. yield self._check_result(sub_result) return # Do not fall through to rest of method. except Exception as e: error = FailedCheckError(e) result = (ERROR, error) yield self._check_result(result)
[ "def", "_exec_check", "(", "self", ",", "check", ":", "FontbakeryCallable", ",", "args", ":", "Dict", "[", "str", ",", "Any", "]", ")", ":", "try", ":", "# A check can be either a normal function that returns one Status or a", "# generator that yields one or more. The latter will return a generator", "# object that we can detect with types.GeneratorType.", "result", "=", "check", "(", "*", "*", "args", ")", "# Might raise.", "if", "isinstance", "(", "result", ",", "types", ".", "GeneratorType", ")", ":", "# Iterate over sub-results one-by-one, list(result) would abort on", "# encountering the first exception.", "for", "sub_result", "in", "result", ":", "# Might raise.", "yield", "self", ".", "_check_result", "(", "sub_result", ")", "return", "# Do not fall through to rest of method.", "except", "Exception", "as", "e", ":", "error", "=", "FailedCheckError", "(", "e", ")", "result", "=", "(", "ERROR", ",", "error", ")", "yield", "self", ".", "_check_result", "(", "result", ")" ]
Yields check sub results. Each check result is a tuple of: (<Status>, mixed message) `status`: must be an instance of Status. If one of the `status` entries in one of the results is FAIL, the whole check is considered failed. WARN is most likely a PASS in a non strict mode and a FAIL in a strict mode. `message`: * If it is an `Exception` type we expect `status` not to be PASS * If it is a `string` it's a description of what passed or failed. * we'll think of an AdvancedMessageType as well, so that we can connect the check result with more in depth knowledge from the check definition.
[ "Yields", "check", "sub", "results", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L318-L352
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
CheckRunner.check_order
def check_order(self, order): """ order must be a subset of self.order """ own_order = self.order for item in order: if item not in own_order: raise ValueError(f'Order item {item} not found.') return order
python
def check_order(self, order): """ order must be a subset of self.order """ own_order = self.order for item in order: if item not in own_order: raise ValueError(f'Order item {item} not found.') return order
[ "def", "check_order", "(", "self", ",", "order", ")", ":", "own_order", "=", "self", ".", "order", "for", "item", "in", "order", ":", "if", "item", "not", "in", "own_order", ":", "raise", "ValueError", "(", "f'Order item {item} not found.'", ")", "return", "order" ]
order must be a subset of self.order
[ "order", "must", "be", "a", "subset", "of", "self", ".", "order" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L622-L630
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Section.add_check
def add_check(self, check): """ Please use rather `register_check` as a decorator. """ if self._add_check_callback is not None: if not self._add_check_callback(self, check): # rejected, skip! return False self._checkid2index[check.id] = len(self._checks) self._checks.append(check) return True
python
def add_check(self, check): """ Please use rather `register_check` as a decorator. """ if self._add_check_callback is not None: if not self._add_check_callback(self, check): # rejected, skip! return False self._checkid2index[check.id] = len(self._checks) self._checks.append(check) return True
[ "def", "add_check", "(", "self", ",", "check", ")", ":", "if", "self", ".", "_add_check_callback", "is", "not", "None", ":", "if", "not", "self", ".", "_add_check_callback", "(", "self", ",", "check", ")", ":", "# rejected, skip!", "return", "False", "self", ".", "_checkid2index", "[", "check", ".", "id", "]", "=", "len", "(", "self", ".", "_checks", ")", "self", ".", "_checks", ".", "append", "(", "check", ")", "return", "True" ]
Please use rather `register_check` as a decorator.
[ "Please", "use", "rather", "register_check", "as", "a", "decorator", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L732-L743
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Section.merge_section
def merge_section(self, section, filter_func=None): """ Add section.checks to self, if not skipped by self._add_check_callback. order, description, etc. are not updated. """ for check in section.checks: if filter_func and not filter_func(check): continue self.add_check(check)
python
def merge_section(self, section, filter_func=None): """ Add section.checks to self, if not skipped by self._add_check_callback. order, description, etc. are not updated. """ for check in section.checks: if filter_func and not filter_func(check): continue self.add_check(check)
[ "def", "merge_section", "(", "self", ",", "section", ",", "filter_func", "=", "None", ")", ":", "for", "check", "in", "section", ".", "checks", ":", "if", "filter_func", "and", "not", "filter_func", "(", "check", ")", ":", "continue", "self", ".", "add_check", "(", "check", ")" ]
Add section.checks to self, if not skipped by self._add_check_callback. order, description, etc. are not updated.
[ "Add", "section", ".", "checks", "to", "self", "if", "not", "skipped", "by", "self", ".", "_add_check_callback", ".", "order", "description", "etc", ".", "are", "not", "updated", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L745-L753
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile.validate_values
def validate_values(self, values): """ Validate values if they are registered as expected_values and present. * If they are not registered they shouldn't be used anywhere at all because profile can self check (profile.check_dependencies) for missing/undefined dependencies. * If they are not present in values but registered as expected_values either the expected value has a default value OR a request for that name will raise a KeyError on runtime. We don't know if all expected values are actually needed/used, thus this fails late. """ format_message = '{}: {} (value: {})'.format messages = [] for name, value in values.items(): if name not in self.expected_values: continue valid, message = self.expected_values[name].validate(value) if valid: continue messages.append(format_message(name, message, value)) if len(messages): return False, '\n'.join(messages) return True, None
python
def validate_values(self, values): """ Validate values if they are registered as expected_values and present. * If they are not registered they shouldn't be used anywhere at all because profile can self check (profile.check_dependencies) for missing/undefined dependencies. * If they are not present in values but registered as expected_values either the expected value has a default value OR a request for that name will raise a KeyError on runtime. We don't know if all expected values are actually needed/used, thus this fails late. """ format_message = '{}: {} (value: {})'.format messages = [] for name, value in values.items(): if name not in self.expected_values: continue valid, message = self.expected_values[name].validate(value) if valid: continue messages.append(format_message(name, message, value)) if len(messages): return False, '\n'.join(messages) return True, None
[ "def", "validate_values", "(", "self", ",", "values", ")", ":", "format_message", "=", "'{}: {} (value: {})'", ".", "format", "messages", "=", "[", "]", "for", "name", ",", "value", "in", "values", ".", "items", "(", ")", ":", "if", "name", "not", "in", "self", ".", "expected_values", ":", "continue", "valid", ",", "message", "=", "self", ".", "expected_values", "[", "name", "]", ".", "validate", "(", "value", ")", "if", "valid", ":", "continue", "messages", ".", "append", "(", "format_message", "(", "name", ",", "message", ",", "value", ")", ")", "if", "len", "(", "messages", ")", ":", "return", "False", ",", "'\\n'", ".", "join", "(", "messages", ")", "return", "True", ",", "None" ]
Validate values if they are registered as expected_values and present. * If they are not registered they shouldn't be used anywhere at all because profile can self check (profile.check_dependencies) for missing/undefined dependencies. * If they are not present in values but registered as expected_values either the expected value has a default value OR a request for that name will raise a KeyError on runtime. We don't know if all expected values are actually needed/used, thus this fails late.
[ "Validate", "values", "if", "they", "are", "registered", "as", "expected_values", "and", "present", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L993-L1017
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile._get_aggregate_args
def _get_aggregate_args(self, item, key): """ Get all arguments or mandatory arguments of the item. Item is a check or a condition, which means it can be dependent on more conditions, this climbs down all the way. """ if not key in ('args', 'mandatoryArgs'): raise TypeError('key must be "args" or "mandatoryArgs", got {}').format(key) dependencies = list(getattr(item, key)) if hasattr(item, 'conditions'): dependencies += [name for negated, name in map(is_negated, item.conditions)] args = set() while dependencies: name = dependencies.pop() if name in args: continue args.add(name) # if this is a condition, expand its dependencies c = self.conditions.get(name, None) if c is None: continue dependencies += [dependency for dependency in getattr(c, key) if dependency not in args] return args
python
def _get_aggregate_args(self, item, key): """ Get all arguments or mandatory arguments of the item. Item is a check or a condition, which means it can be dependent on more conditions, this climbs down all the way. """ if not key in ('args', 'mandatoryArgs'): raise TypeError('key must be "args" or "mandatoryArgs", got {}').format(key) dependencies = list(getattr(item, key)) if hasattr(item, 'conditions'): dependencies += [name for negated, name in map(is_negated, item.conditions)] args = set() while dependencies: name = dependencies.pop() if name in args: continue args.add(name) # if this is a condition, expand its dependencies c = self.conditions.get(name, None) if c is None: continue dependencies += [dependency for dependency in getattr(c, key) if dependency not in args] return args
[ "def", "_get_aggregate_args", "(", "self", ",", "item", ",", "key", ")", ":", "if", "not", "key", "in", "(", "'args'", ",", "'mandatoryArgs'", ")", ":", "raise", "TypeError", "(", "'key must be \"args\" or \"mandatoryArgs\", got {}'", ")", ".", "format", "(", "key", ")", "dependencies", "=", "list", "(", "getattr", "(", "item", ",", "key", ")", ")", "if", "hasattr", "(", "item", ",", "'conditions'", ")", ":", "dependencies", "+=", "[", "name", "for", "negated", ",", "name", "in", "map", "(", "is_negated", ",", "item", ".", "conditions", ")", "]", "args", "=", "set", "(", ")", "while", "dependencies", ":", "name", "=", "dependencies", ".", "pop", "(", ")", "if", "name", "in", "args", ":", "continue", "args", ".", "add", "(", "name", ")", "# if this is a condition, expand its dependencies", "c", "=", "self", ".", "conditions", ".", "get", "(", "name", ",", "None", ")", "if", "c", "is", "None", ":", "continue", "dependencies", "+=", "[", "dependency", "for", "dependency", "in", "getattr", "(", "c", ",", "key", ")", "if", "dependency", "not", "in", "args", "]", "return", "args" ]
Get all arguments or mandatory arguments of the item. Item is a check or a condition, which means it can be dependent on more conditions, this climbs down all the way.
[ "Get", "all", "arguments", "or", "mandatory", "arguments", "of", "the", "item", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1055-L1079
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile.get_iterargs
def get_iterargs(self, item): """ Returns a tuple of all iterags for item, sorted by name.""" # iterargs should always be mandatory, unless there's a good reason # not to, which I can't think of right now. args = self._get_aggregate_args(item, 'mandatoryArgs') return tuple(sorted([arg for arg in args if arg in self.iterargs]))
python
def get_iterargs(self, item): """ Returns a tuple of all iterags for item, sorted by name.""" # iterargs should always be mandatory, unless there's a good reason # not to, which I can't think of right now. args = self._get_aggregate_args(item, 'mandatoryArgs') return tuple(sorted([arg for arg in args if arg in self.iterargs]))
[ "def", "get_iterargs", "(", "self", ",", "item", ")", ":", "# iterargs should always be mandatory, unless there's a good reason", "# not to, which I can't think of right now.", "args", "=", "self", ".", "_get_aggregate_args", "(", "item", ",", "'mandatoryArgs'", ")", "return", "tuple", "(", "sorted", "(", "[", "arg", "for", "arg", "in", "args", "if", "arg", "in", "self", ".", "iterargs", "]", ")", ")" ]
Returns a tuple of all iterags for item, sorted by name.
[ "Returns", "a", "tuple", "of", "all", "iterags", "for", "item", "sorted", "by", "name", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1081-L1087
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile.auto_register
def auto_register(self, symbol_table, filter_func=None, profile_imports=None): """ Register items from `symbol_table` in the profile. Get all items from `symbol_table` dict and from `symbol_table.profile_imports` if it is present. If they an item is an instance of FontBakeryCheck, FontBakeryCondition or FontBakeryExpectedValue and register it in the default section. If an item is a python module, try to get a profile using `get_module_profile(item)` and then using `merge_profile`; If the profile_imports kwarg is given, it is used instead of the one taken from the module namespace. To register the current module use explicitly: `profile.auto_register(globals())` OR maybe: `profile.auto_register(sys.modules[__name__].__dict__)` To register an imported module explicitly: `profile.auto_register(module.__dict__)` if filter_func is defined it is called like: filter_func(type, name_or_id, item) where type: one of "check", "module", "condition", "expected_value", "iterarg", "derived_iterable", "alias" name_or_id: the name at which the item will be registered. if type == 'check': the check.id if type == 'module': the module name (module.__name__) item: the item to be registered if filter_func returns a falsy value for an item, the item will not be registered. """ if profile_imports: symbol_table = symbol_table.copy() # Avoid messing with original table symbol_table['profile_imports'] = profile_imports all_items = list(symbol_table.values()) + self._load_profile_imports(symbol_table) namespace_types = (FontBakeryCondition, FontBakeryExpectedValue) namespace_items = [] for item in all_items: if isinstance(item, namespace_types): # register these after all modules have been registered. That way, # "local" items can optionally force override items registered # previously by modules. namespace_items.append(item) elif isinstance(item, FontBakeryCheck): if filter_func and not filter_func('check', item.id, item): continue self.register_check(item) elif isinstance(item, types.ModuleType): if filter_func and not filter_func('module', item.__name__, item): continue profile = get_module_profile(item) if profile: self.merge_profile(profile, filter_func=filter_func) for item in namespace_items: if isinstance(item, FontBakeryCondition): if filter_func and not filter_func('condition', item.name, item): continue self.register_condition(item) elif isinstance(item, FontBakeryExpectedValue): if filter_func and not filter_func('expected_value', item.name, item): continue self.register_expected_value(item)
python
def auto_register(self, symbol_table, filter_func=None, profile_imports=None): """ Register items from `symbol_table` in the profile. Get all items from `symbol_table` dict and from `symbol_table.profile_imports` if it is present. If they an item is an instance of FontBakeryCheck, FontBakeryCondition or FontBakeryExpectedValue and register it in the default section. If an item is a python module, try to get a profile using `get_module_profile(item)` and then using `merge_profile`; If the profile_imports kwarg is given, it is used instead of the one taken from the module namespace. To register the current module use explicitly: `profile.auto_register(globals())` OR maybe: `profile.auto_register(sys.modules[__name__].__dict__)` To register an imported module explicitly: `profile.auto_register(module.__dict__)` if filter_func is defined it is called like: filter_func(type, name_or_id, item) where type: one of "check", "module", "condition", "expected_value", "iterarg", "derived_iterable", "alias" name_or_id: the name at which the item will be registered. if type == 'check': the check.id if type == 'module': the module name (module.__name__) item: the item to be registered if filter_func returns a falsy value for an item, the item will not be registered. """ if profile_imports: symbol_table = symbol_table.copy() # Avoid messing with original table symbol_table['profile_imports'] = profile_imports all_items = list(symbol_table.values()) + self._load_profile_imports(symbol_table) namespace_types = (FontBakeryCondition, FontBakeryExpectedValue) namespace_items = [] for item in all_items: if isinstance(item, namespace_types): # register these after all modules have been registered. That way, # "local" items can optionally force override items registered # previously by modules. namespace_items.append(item) elif isinstance(item, FontBakeryCheck): if filter_func and not filter_func('check', item.id, item): continue self.register_check(item) elif isinstance(item, types.ModuleType): if filter_func and not filter_func('module', item.__name__, item): continue profile = get_module_profile(item) if profile: self.merge_profile(profile, filter_func=filter_func) for item in namespace_items: if isinstance(item, FontBakeryCondition): if filter_func and not filter_func('condition', item.name, item): continue self.register_condition(item) elif isinstance(item, FontBakeryExpectedValue): if filter_func and not filter_func('expected_value', item.name, item): continue self.register_expected_value(item)
[ "def", "auto_register", "(", "self", ",", "symbol_table", ",", "filter_func", "=", "None", ",", "profile_imports", "=", "None", ")", ":", "if", "profile_imports", ":", "symbol_table", "=", "symbol_table", ".", "copy", "(", ")", "# Avoid messing with original table", "symbol_table", "[", "'profile_imports'", "]", "=", "profile_imports", "all_items", "=", "list", "(", "symbol_table", ".", "values", "(", ")", ")", "+", "self", ".", "_load_profile_imports", "(", "symbol_table", ")", "namespace_types", "=", "(", "FontBakeryCondition", ",", "FontBakeryExpectedValue", ")", "namespace_items", "=", "[", "]", "for", "item", "in", "all_items", ":", "if", "isinstance", "(", "item", ",", "namespace_types", ")", ":", "# register these after all modules have been registered. That way,", "# \"local\" items can optionally force override items registered", "# previously by modules.", "namespace_items", ".", "append", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "FontBakeryCheck", ")", ":", "if", "filter_func", "and", "not", "filter_func", "(", "'check'", ",", "item", ".", "id", ",", "item", ")", ":", "continue", "self", ".", "register_check", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "types", ".", "ModuleType", ")", ":", "if", "filter_func", "and", "not", "filter_func", "(", "'module'", ",", "item", ".", "__name__", ",", "item", ")", ":", "continue", "profile", "=", "get_module_profile", "(", "item", ")", "if", "profile", ":", "self", ".", "merge_profile", "(", "profile", ",", "filter_func", "=", "filter_func", ")", "for", "item", "in", "namespace_items", ":", "if", "isinstance", "(", "item", ",", "FontBakeryCondition", ")", ":", "if", "filter_func", "and", "not", "filter_func", "(", "'condition'", ",", "item", ".", "name", ",", "item", ")", ":", "continue", "self", ".", "register_condition", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "FontBakeryExpectedValue", ")", ":", "if", "filter_func", "and", "not", "filter_func", "(", "'expected_value'", ",", "item", ".", "name", ",", "item", ")", ":", "continue", "self", ".", "register_expected_value", "(", "item", ")" ]
Register items from `symbol_table` in the profile. Get all items from `symbol_table` dict and from `symbol_table.profile_imports` if it is present. If they an item is an instance of FontBakeryCheck, FontBakeryCondition or FontBakeryExpectedValue and register it in the default section. If an item is a python module, try to get a profile using `get_module_profile(item)` and then using `merge_profile`; If the profile_imports kwarg is given, it is used instead of the one taken from the module namespace. To register the current module use explicitly: `profile.auto_register(globals())` OR maybe: `profile.auto_register(sys.modules[__name__].__dict__)` To register an imported module explicitly: `profile.auto_register(module.__dict__)` if filter_func is defined it is called like: filter_func(type, name_or_id, item) where type: one of "check", "module", "condition", "expected_value", "iterarg", "derived_iterable", "alias" name_or_id: the name at which the item will be registered. if type == 'check': the check.id if type == 'module': the module name (module.__name__) item: the item to be registered if filter_func returns a falsy value for an item, the item will not be registered.
[ "Register", "items", "from", "symbol_table", "in", "the", "profile", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1417-L1481
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile.merge_profile
def merge_profile(self, profile, filter_func=None): """Copy all namespace items from profile to self. Namespace items are: 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' Don't change any contents of profile ever! That means sections are cloned not used directly filter_func: see description in auto_register """ # 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' for ns_type in self._valid_namespace_types: # this will raise a NamespaceError if an item of profile.{ns_type} # is already registered. ns_dict = getattr(profile, ns_type) if filter_func: ns_type_singular = self._valid_namespace_types[ns_type] ns_dict = {name:item for name,item in ns_dict.items() if filter_func(ns_type_singular, name, item)} self._add_dict_to_namespace(ns_type, ns_dict) check_filter_func = None if not filter_func else \ lambda check: filter_func('check', check.id, check) for section in profile.sections: my_section = self._sections.get(str(section), None) if not len(section.checks): continue if my_section is None: # create a new section: don't change other module/profile contents my_section = section.clone(check_filter_func) self.add_section(my_section) else: # order, description are not updated my_section.merge_section(section, check_filter_func)
python
def merge_profile(self, profile, filter_func=None): """Copy all namespace items from profile to self. Namespace items are: 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' Don't change any contents of profile ever! That means sections are cloned not used directly filter_func: see description in auto_register """ # 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' for ns_type in self._valid_namespace_types: # this will raise a NamespaceError if an item of profile.{ns_type} # is already registered. ns_dict = getattr(profile, ns_type) if filter_func: ns_type_singular = self._valid_namespace_types[ns_type] ns_dict = {name:item for name,item in ns_dict.items() if filter_func(ns_type_singular, name, item)} self._add_dict_to_namespace(ns_type, ns_dict) check_filter_func = None if not filter_func else \ lambda check: filter_func('check', check.id, check) for section in profile.sections: my_section = self._sections.get(str(section), None) if not len(section.checks): continue if my_section is None: # create a new section: don't change other module/profile contents my_section = section.clone(check_filter_func) self.add_section(my_section) else: # order, description are not updated my_section.merge_section(section, check_filter_func)
[ "def", "merge_profile", "(", "self", ",", "profile", ",", "filter_func", "=", "None", ")", ":", "# 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values'", "for", "ns_type", "in", "self", ".", "_valid_namespace_types", ":", "# this will raise a NamespaceError if an item of profile.{ns_type}", "# is already registered.", "ns_dict", "=", "getattr", "(", "profile", ",", "ns_type", ")", "if", "filter_func", ":", "ns_type_singular", "=", "self", ".", "_valid_namespace_types", "[", "ns_type", "]", "ns_dict", "=", "{", "name", ":", "item", "for", "name", ",", "item", "in", "ns_dict", ".", "items", "(", ")", "if", "filter_func", "(", "ns_type_singular", ",", "name", ",", "item", ")", "}", "self", ".", "_add_dict_to_namespace", "(", "ns_type", ",", "ns_dict", ")", "check_filter_func", "=", "None", "if", "not", "filter_func", "else", "lambda", "check", ":", "filter_func", "(", "'check'", ",", "check", ".", "id", ",", "check", ")", "for", "section", "in", "profile", ".", "sections", ":", "my_section", "=", "self", ".", "_sections", ".", "get", "(", "str", "(", "section", ")", ",", "None", ")", "if", "not", "len", "(", "section", ".", "checks", ")", ":", "continue", "if", "my_section", "is", "None", ":", "# create a new section: don't change other module/profile contents", "my_section", "=", "section", ".", "clone", "(", "check_filter_func", ")", "self", ".", "add_section", "(", "my_section", ")", "else", ":", "# order, description are not updated", "my_section", ".", "merge_section", "(", "section", ",", "check_filter_func", ")" ]
Copy all namespace items from profile to self. Namespace items are: 'iterargs', 'derived_iterables', 'aliases', 'conditions', 'expected_values' Don't change any contents of profile ever! That means sections are cloned not used directly filter_func: see description in auto_register
[ "Copy", "all", "namespace", "items", "from", "profile", "to", "self", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1483-L1517
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
Profile.serialize_identity
def serialize_identity(self, identity): """ Return a json string that can also be used as a key. The JSON is explicitly unambiguous in the item order entries (dictionaries are not ordered usually) Otherwise it is valid JSON """ section, check, iterargs = identity values = map( # separators are without space, which is the default in JavaScript; # just in case we need to make these keys in JS. partial(json.dumps, separators=(',', ':')) # iterargs are sorted, because it doesn't matter for the result # but it gives more predictable keys. # Though, arguably, the order generated by the profile is also good # and conveys insights on how the order came to be (clustering of # iterargs). `sorted(iterargs)` however is more robust over time, # the keys will be the same, even if the sorting order changes. , [str(section), check.id, sorted(iterargs)] ) return '{{"section":{},"check":{},"iterargs":{}}}'.format(*values)
python
def serialize_identity(self, identity): """ Return a json string that can also be used as a key. The JSON is explicitly unambiguous in the item order entries (dictionaries are not ordered usually) Otherwise it is valid JSON """ section, check, iterargs = identity values = map( # separators are without space, which is the default in JavaScript; # just in case we need to make these keys in JS. partial(json.dumps, separators=(',', ':')) # iterargs are sorted, because it doesn't matter for the result # but it gives more predictable keys. # Though, arguably, the order generated by the profile is also good # and conveys insights on how the order came to be (clustering of # iterargs). `sorted(iterargs)` however is more robust over time, # the keys will be the same, even if the sorting order changes. , [str(section), check.id, sorted(iterargs)] ) return '{{"section":{},"check":{},"iterargs":{}}}'.format(*values)
[ "def", "serialize_identity", "(", "self", ",", "identity", ")", ":", "section", ",", "check", ",", "iterargs", "=", "identity", "values", "=", "map", "(", "# separators are without space, which is the default in JavaScript;", "# just in case we need to make these keys in JS.", "partial", "(", "json", ".", "dumps", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "# iterargs are sorted, because it doesn't matter for the result", "# but it gives more predictable keys.", "# Though, arguably, the order generated by the profile is also good", "# and conveys insights on how the order came to be (clustering of", "# iterargs). `sorted(iterargs)` however is more robust over time,", "# the keys will be the same, even if the sorting order changes.", ",", "[", "str", "(", "section", ")", ",", "check", ".", "id", ",", "sorted", "(", "iterargs", ")", "]", ")", "return", "'{{\"section\":{},\"check\":{},\"iterargs\":{}}}'", ".", "format", "(", "*", "values", ")" ]
Return a json string that can also be used as a key. The JSON is explicitly unambiguous in the item order entries (dictionaries are not ordered usually) Otherwise it is valid JSON
[ "Return", "a", "json", "string", "that", "can", "also", "be", "used", "as", "a", "key", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1550-L1570
train
googlefonts/fontbakery
Lib/fontbakery/commands/check_profile.py
get_profile
def get_profile(): """ Prefetch the profile module, to fill some holes in the help text.""" argument_parser = ThrowingArgumentParser(add_help=False) argument_parser.add_argument('profile') try: args, _ = argument_parser.parse_known_args() except ArgumentParserError: # silently fails, the main parser will show usage string. return Profile() imported = get_module(args.profile) profile = get_module_profile(imported) if not profile: raise Exception(f"Can't get a profile from {imported}.") return profile
python
def get_profile(): """ Prefetch the profile module, to fill some holes in the help text.""" argument_parser = ThrowingArgumentParser(add_help=False) argument_parser.add_argument('profile') try: args, _ = argument_parser.parse_known_args() except ArgumentParserError: # silently fails, the main parser will show usage string. return Profile() imported = get_module(args.profile) profile = get_module_profile(imported) if not profile: raise Exception(f"Can't get a profile from {imported}.") return profile
[ "def", "get_profile", "(", ")", ":", "argument_parser", "=", "ThrowingArgumentParser", "(", "add_help", "=", "False", ")", "argument_parser", ".", "add_argument", "(", "'profile'", ")", "try", ":", "args", ",", "_", "=", "argument_parser", ".", "parse_known_args", "(", ")", "except", "ArgumentParserError", ":", "# silently fails, the main parser will show usage string.", "return", "Profile", "(", ")", "imported", "=", "get_module", "(", "args", ".", "profile", ")", "profile", "=", "get_module_profile", "(", "imported", ")", "if", "not", "profile", ":", "raise", "Exception", "(", "f\"Can't get a profile from {imported}.\"", ")", "return", "profile" ]
Prefetch the profile module, to fill some holes in the help text.
[ "Prefetch", "the", "profile", "module", "to", "fill", "some", "holes", "in", "the", "help", "text", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/commands/check_profile.py#L193-L206
train
googlefonts/fontbakery
Lib/fontbakery/commands/generate_glyphdata.py
collate_fonts_data
def collate_fonts_data(fonts_data): """Collate individual fonts data into a single glyph data list.""" glyphs = {} for family in fonts_data: for glyph in family: if glyph['unicode'] not in glyphs: glyphs[glyph['unicode']] = glyph else: c = glyphs[glyph['unicode']]['contours'] glyphs[glyph['unicode']]['contours'] = c | glyph['contours'] return glyphs.values()
python
def collate_fonts_data(fonts_data): """Collate individual fonts data into a single glyph data list.""" glyphs = {} for family in fonts_data: for glyph in family: if glyph['unicode'] not in glyphs: glyphs[glyph['unicode']] = glyph else: c = glyphs[glyph['unicode']]['contours'] glyphs[glyph['unicode']]['contours'] = c | glyph['contours'] return glyphs.values()
[ "def", "collate_fonts_data", "(", "fonts_data", ")", ":", "glyphs", "=", "{", "}", "for", "family", "in", "fonts_data", ":", "for", "glyph", "in", "family", ":", "if", "glyph", "[", "'unicode'", "]", "not", "in", "glyphs", ":", "glyphs", "[", "glyph", "[", "'unicode'", "]", "]", "=", "glyph", "else", ":", "c", "=", "glyphs", "[", "glyph", "[", "'unicode'", "]", "]", "[", "'contours'", "]", "glyphs", "[", "glyph", "[", "'unicode'", "]", "]", "[", "'contours'", "]", "=", "c", "|", "glyph", "[", "'contours'", "]", "return", "glyphs", ".", "values", "(", ")" ]
Collate individual fonts data into a single glyph data list.
[ "Collate", "individual", "fonts", "data", "into", "a", "single", "glyph", "data", "list", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/commands/generate_glyphdata.py#L35-L46
train
googlefonts/fontbakery
Lib/fontbakery/profiles/adobefonts.py
com_adobe_fonts_check_family_consistent_upm
def com_adobe_fonts_check_family_consistent_upm(ttFonts): """Fonts have consistent Units Per Em?""" upm_set = set() for ttFont in ttFonts: upm_set.add(ttFont['head'].unitsPerEm) if len(upm_set) > 1: yield FAIL, ("Fonts have different units per em: {}." ).format(sorted(upm_set)) else: yield PASS, "Fonts have consistent units per em."
python
def com_adobe_fonts_check_family_consistent_upm(ttFonts): """Fonts have consistent Units Per Em?""" upm_set = set() for ttFont in ttFonts: upm_set.add(ttFont['head'].unitsPerEm) if len(upm_set) > 1: yield FAIL, ("Fonts have different units per em: {}." ).format(sorted(upm_set)) else: yield PASS, "Fonts have consistent units per em."
[ "def", "com_adobe_fonts_check_family_consistent_upm", "(", "ttFonts", ")", ":", "upm_set", "=", "set", "(", ")", "for", "ttFont", "in", "ttFonts", ":", "upm_set", ".", "add", "(", "ttFont", "[", "'head'", "]", ".", "unitsPerEm", ")", "if", "len", "(", "upm_set", ")", ">", "1", ":", "yield", "FAIL", ",", "(", "\"Fonts have different units per em: {}.\"", ")", ".", "format", "(", "sorted", "(", "upm_set", ")", ")", "else", ":", "yield", "PASS", ",", "\"Fonts have consistent units per em.\"" ]
Fonts have consistent Units Per Em?
[ "Fonts", "have", "consistent", "Units", "Per", "Em?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/adobefonts.py#L27-L36
train
googlefonts/fontbakery
Lib/fontbakery/profiles/adobefonts.py
com_adobe_fonts_check_find_empty_letters
def com_adobe_fonts_check_find_empty_letters(ttFont): """Letters in font have glyphs that are not empty?""" cmap = ttFont.getBestCmap() passed = True # http://unicode.org/reports/tr44/#General_Category_Values letter_categories = { 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', } invisible_letters = { 0x115F, 0x1160, 0x3164, 0xFFA0, # Hangul filler chars (category='Lo') } for unicode_val, glyph_name in cmap.items(): category = unicodedata.category(chr(unicode_val)) if (_quick_and_dirty_glyph_is_empty(ttFont, glyph_name)) \ and (category in letter_categories) \ and (unicode_val not in invisible_letters): yield FAIL, \ ("U+%04X should be visible, but its glyph ('%s') is empty." % (unicode_val, glyph_name)) passed = False if passed: yield PASS, "No empty glyphs for letters found."
python
def com_adobe_fonts_check_find_empty_letters(ttFont): """Letters in font have glyphs that are not empty?""" cmap = ttFont.getBestCmap() passed = True # http://unicode.org/reports/tr44/#General_Category_Values letter_categories = { 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', } invisible_letters = { 0x115F, 0x1160, 0x3164, 0xFFA0, # Hangul filler chars (category='Lo') } for unicode_val, glyph_name in cmap.items(): category = unicodedata.category(chr(unicode_val)) if (_quick_and_dirty_glyph_is_empty(ttFont, glyph_name)) \ and (category in letter_categories) \ and (unicode_val not in invisible_letters): yield FAIL, \ ("U+%04X should be visible, but its glyph ('%s') is empty." % (unicode_val, glyph_name)) passed = False if passed: yield PASS, "No empty glyphs for letters found."
[ "def", "com_adobe_fonts_check_find_empty_letters", "(", "ttFont", ")", ":", "cmap", "=", "ttFont", ".", "getBestCmap", "(", ")", "passed", "=", "True", "# http://unicode.org/reports/tr44/#General_Category_Values", "letter_categories", "=", "{", "'Ll'", ",", "'Lm'", ",", "'Lo'", ",", "'Lt'", ",", "'Lu'", ",", "}", "invisible_letters", "=", "{", "0x115F", ",", "0x1160", ",", "0x3164", ",", "0xFFA0", ",", "# Hangul filler chars (category='Lo')", "}", "for", "unicode_val", ",", "glyph_name", "in", "cmap", ".", "items", "(", ")", ":", "category", "=", "unicodedata", ".", "category", "(", "chr", "(", "unicode_val", ")", ")", "if", "(", "_quick_and_dirty_glyph_is_empty", "(", "ttFont", ",", "glyph_name", ")", ")", "and", "(", "category", "in", "letter_categories", ")", "and", "(", "unicode_val", "not", "in", "invisible_letters", ")", ":", "yield", "FAIL", ",", "(", "\"U+%04X should be visible, but its glyph ('%s') is empty.\"", "%", "(", "unicode_val", ",", "glyph_name", ")", ")", "passed", "=", "False", "if", "passed", ":", "yield", "PASS", ",", "\"No empty glyphs for letters found.\"" ]
Letters in font have glyphs that are not empty?
[ "Letters", "in", "font", "have", "glyphs", "that", "are", "not", "empty?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/adobefonts.py#L81-L103
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_adobe_fonts_check_name_empty_records
def com_adobe_fonts_check_name_empty_records(ttFont): """Check name table for empty records.""" failed = False for name_record in ttFont['name'].names: name_string = name_record.toUnicode().strip() if len(name_string) == 0: failed = True name_key = tuple([name_record.platformID, name_record.platEncID, name_record.langID, name_record.nameID]) yield FAIL, ("'name' table record with key={} is " "empty and should be removed." ).format(name_key) if not failed: yield PASS, ("No empty name table records found.")
python
def com_adobe_fonts_check_name_empty_records(ttFont): """Check name table for empty records.""" failed = False for name_record in ttFont['name'].names: name_string = name_record.toUnicode().strip() if len(name_string) == 0: failed = True name_key = tuple([name_record.platformID, name_record.platEncID, name_record.langID, name_record.nameID]) yield FAIL, ("'name' table record with key={} is " "empty and should be removed." ).format(name_key) if not failed: yield PASS, ("No empty name table records found.")
[ "def", "com_adobe_fonts_check_name_empty_records", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "name_record", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "name_string", "=", "name_record", ".", "toUnicode", "(", ")", ".", "strip", "(", ")", "if", "len", "(", "name_string", ")", "==", "0", ":", "failed", "=", "True", "name_key", "=", "tuple", "(", "[", "name_record", ".", "platformID", ",", "name_record", ".", "platEncID", ",", "name_record", ".", "langID", ",", "name_record", ".", "nameID", "]", ")", "yield", "FAIL", ",", "(", "\"'name' table record with key={} is \"", "\"empty and should be removed.\"", ")", ".", "format", "(", "name_key", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"No empty name table records found.\"", ")" ]
Check name table for empty records.
[ "Check", "name", "table", "for", "empty", "records", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L20-L33
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_name_no_copyright_on_description
def com_google_fonts_check_name_no_copyright_on_description(ttFont): """Description strings in the name table must not contain copyright info.""" failed = False for name in ttFont['name'].names: if 'opyright' in name.string.decode(name.getEncoding())\ and name.nameID == NameID.DESCRIPTION: failed = True if failed: yield FAIL, ("Namerecords with ID={} (NameID.DESCRIPTION)" " should be removed (perhaps these were added by" " a longstanding FontLab Studio 5.x bug that" " copied copyright notices to them.)" "").format(NameID.DESCRIPTION) else: yield PASS, ("Description strings in the name table" " do not contain any copyright string.")
python
def com_google_fonts_check_name_no_copyright_on_description(ttFont): """Description strings in the name table must not contain copyright info.""" failed = False for name in ttFont['name'].names: if 'opyright' in name.string.decode(name.getEncoding())\ and name.nameID == NameID.DESCRIPTION: failed = True if failed: yield FAIL, ("Namerecords with ID={} (NameID.DESCRIPTION)" " should be removed (perhaps these were added by" " a longstanding FontLab Studio 5.x bug that" " copied copyright notices to them.)" "").format(NameID.DESCRIPTION) else: yield PASS, ("Description strings in the name table" " do not contain any copyright string.")
[ "def", "com_google_fonts_check_name_no_copyright_on_description", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "name", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "if", "'opyright'", "in", "name", ".", "string", ".", "decode", "(", "name", ".", "getEncoding", "(", ")", ")", "and", "name", ".", "nameID", "==", "NameID", ".", "DESCRIPTION", ":", "failed", "=", "True", "if", "failed", ":", "yield", "FAIL", ",", "(", "\"Namerecords with ID={} (NameID.DESCRIPTION)\"", "\" should be removed (perhaps these were added by\"", "\" a longstanding FontLab Studio 5.x bug that\"", "\" copied copyright notices to them.)\"", "\"\"", ")", ".", "format", "(", "NameID", ".", "DESCRIPTION", ")", "else", ":", "yield", "PASS", ",", "(", "\"Description strings in the name table\"", "\" do not contain any copyright string.\"", ")" ]
Description strings in the name table must not contain copyright info.
[ "Description", "strings", "in", "the", "name", "table", "must", "not", "contain", "copyright", "info", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L42-L58
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_monospace
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats): """Checking correctness of monospaced metadata. There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not trully monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...) Monospace fonts must: * post.isFixedWidth "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)" www.microsoft.com/typography/otspec/post.htm * hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater. www.microsoft.com/typography/otspec/hhea.htm * OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced." www.microsoft.com/typography/otspec/os2.htm#pan monotypecom-test.monotype.de/services/pan2 * OS/2.xAverageWidth must be set accurately. "OS/2.xAverageWidth IS used when rendering monospaced fonts, at least by Windows GDI" http://typedrawers.com/discussion/comment/15397/#Comment_15397 Also we should report an error for glyphs not of average width """ from fontbakery.constants import (IsFixedWidth, PANOSE_Proportion) failed = False # Note: These values are read from the dict here only to # reduce the max line length in the check implementation below: seems_monospaced = glyph_metrics_stats["seems_monospaced"] most_common_width = glyph_metrics_stats["most_common_width"] width_max = glyph_metrics_stats['width_max'] if ttFont['hhea'].advanceWidthMax != width_max: failed = True yield FAIL, Message("bad-advanceWidthMax", ("Value of hhea.advanceWidthMax" " should be set to {} but got" " {} instead." "").format(width_max, ttFont['hhea'].advanceWidthMax)) if seems_monospaced: if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("mono-bad-post-isFixedPitch", ("On monospaced fonts, the value of" " post.isFixedPitch must be set to a non-zero value" " (meaning 'fixed width monospaced')," " but got {} instead." "").format(ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("mono-bad-panose-proportion", ("On monospaced fonts, the value of" " OS/2.panose.bProportion must be set to {}" " (proportion: monospaced), but got" " {} instead." "").format(PANOSE_Proportion.MONOSPACED, ttFont['OS/2'].panose.bProportion)) num_glyphs = len(ttFont['glyf'].glyphs) unusually_spaced_glyphs = [ g for g in ttFont['glyf'].glyphs if g not in ['.notdef', '.null', 'NULL'] and ttFont['hmtx'].metrics[g][0] != most_common_width ] outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs if outliers_ratio > 0: failed = True yield WARN, Message("mono-outliers", ("Font is monospaced but {} glyphs" " ({}%) have a different width." " You should check the widths of:" " {}").format( len(unusually_spaced_glyphs), 100.0 * outliers_ratio, unusually_spaced_glyphs)) if not failed: yield PASS, Message("mono-good", ("Font is monospaced and all" " related metadata look good.")) else: # it is a non-monospaced font, so lets make sure # that all monospace-related metadata is properly unset. if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("bad-post-isFixedPitch", ("On non-monospaced fonts, the" " post.isFixedPitch value must be set to {}" " (not monospaced), but got {} instead." "").format(IsFixedWidth.NOT_MONOSPACED, ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("bad-panose-proportion", ("On non-monospaced fonts, the" " OS/2.panose.bProportion value can be set to " " any value except 9 (proportion: monospaced)" " which is the bad value we got in this font.")) if not failed: yield PASS, Message("good", ("Font is not monospaced and" " all related metadata look good."))
python
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats): """Checking correctness of monospaced metadata. There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not trully monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...) Monospace fonts must: * post.isFixedWidth "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)" www.microsoft.com/typography/otspec/post.htm * hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater. www.microsoft.com/typography/otspec/hhea.htm * OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced." www.microsoft.com/typography/otspec/os2.htm#pan monotypecom-test.monotype.de/services/pan2 * OS/2.xAverageWidth must be set accurately. "OS/2.xAverageWidth IS used when rendering monospaced fonts, at least by Windows GDI" http://typedrawers.com/discussion/comment/15397/#Comment_15397 Also we should report an error for glyphs not of average width """ from fontbakery.constants import (IsFixedWidth, PANOSE_Proportion) failed = False # Note: These values are read from the dict here only to # reduce the max line length in the check implementation below: seems_monospaced = glyph_metrics_stats["seems_monospaced"] most_common_width = glyph_metrics_stats["most_common_width"] width_max = glyph_metrics_stats['width_max'] if ttFont['hhea'].advanceWidthMax != width_max: failed = True yield FAIL, Message("bad-advanceWidthMax", ("Value of hhea.advanceWidthMax" " should be set to {} but got" " {} instead." "").format(width_max, ttFont['hhea'].advanceWidthMax)) if seems_monospaced: if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("mono-bad-post-isFixedPitch", ("On monospaced fonts, the value of" " post.isFixedPitch must be set to a non-zero value" " (meaning 'fixed width monospaced')," " but got {} instead." "").format(ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("mono-bad-panose-proportion", ("On monospaced fonts, the value of" " OS/2.panose.bProportion must be set to {}" " (proportion: monospaced), but got" " {} instead." "").format(PANOSE_Proportion.MONOSPACED, ttFont['OS/2'].panose.bProportion)) num_glyphs = len(ttFont['glyf'].glyphs) unusually_spaced_glyphs = [ g for g in ttFont['glyf'].glyphs if g not in ['.notdef', '.null', 'NULL'] and ttFont['hmtx'].metrics[g][0] != most_common_width ] outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs if outliers_ratio > 0: failed = True yield WARN, Message("mono-outliers", ("Font is monospaced but {} glyphs" " ({}%) have a different width." " You should check the widths of:" " {}").format( len(unusually_spaced_glyphs), 100.0 * outliers_ratio, unusually_spaced_glyphs)) if not failed: yield PASS, Message("mono-good", ("Font is monospaced and all" " related metadata look good.")) else: # it is a non-monospaced font, so lets make sure # that all monospace-related metadata is properly unset. if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("bad-post-isFixedPitch", ("On non-monospaced fonts, the" " post.isFixedPitch value must be set to {}" " (not monospaced), but got {} instead." "").format(IsFixedWidth.NOT_MONOSPACED, ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("bad-panose-proportion", ("On non-monospaced fonts, the" " OS/2.panose.bProportion value can be set to " " any value except 9 (proportion: monospaced)" " which is the bad value we got in this font.")) if not failed: yield PASS, Message("good", ("Font is not monospaced and" " all related metadata look good."))
[ "def", "com_google_fonts_check_monospace", "(", "ttFont", ",", "glyph_metrics_stats", ")", ":", "from", "fontbakery", ".", "constants", "import", "(", "IsFixedWidth", ",", "PANOSE_Proportion", ")", "failed", "=", "False", "# Note: These values are read from the dict here only to", "# reduce the max line length in the check implementation below:", "seems_monospaced", "=", "glyph_metrics_stats", "[", "\"seems_monospaced\"", "]", "most_common_width", "=", "glyph_metrics_stats", "[", "\"most_common_width\"", "]", "width_max", "=", "glyph_metrics_stats", "[", "'width_max'", "]", "if", "ttFont", "[", "'hhea'", "]", ".", "advanceWidthMax", "!=", "width_max", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"bad-advanceWidthMax\"", ",", "(", "\"Value of hhea.advanceWidthMax\"", "\" should be set to {} but got\"", "\" {} instead.\"", "\"\"", ")", ".", "format", "(", "width_max", ",", "ttFont", "[", "'hhea'", "]", ".", "advanceWidthMax", ")", ")", "if", "seems_monospaced", ":", "if", "ttFont", "[", "'post'", "]", ".", "isFixedPitch", "==", "IsFixedWidth", ".", "NOT_MONOSPACED", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"mono-bad-post-isFixedPitch\"", ",", "(", "\"On monospaced fonts, the value of\"", "\" post.isFixedPitch must be set to a non-zero value\"", "\" (meaning 'fixed width monospaced'),\"", "\" but got {} instead.\"", "\"\"", ")", ".", "format", "(", "ttFont", "[", "'post'", "]", ".", "isFixedPitch", ")", ")", "if", "ttFont", "[", "'OS/2'", "]", ".", "panose", ".", "bProportion", "!=", "PANOSE_Proportion", ".", "MONOSPACED", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"mono-bad-panose-proportion\"", ",", "(", "\"On monospaced fonts, the value of\"", "\" OS/2.panose.bProportion must be set to {}\"", "\" (proportion: monospaced), but got\"", "\" {} instead.\"", "\"\"", ")", ".", "format", "(", "PANOSE_Proportion", ".", "MONOSPACED", ",", "ttFont", "[", "'OS/2'", "]", ".", "panose", ".", "bProportion", ")", ")", "num_glyphs", "=", "len", "(", "ttFont", "[", "'glyf'", "]", ".", "glyphs", ")", "unusually_spaced_glyphs", "=", "[", "g", "for", "g", "in", "ttFont", "[", "'glyf'", "]", ".", "glyphs", "if", "g", "not", "in", "[", "'.notdef'", ",", "'.null'", ",", "'NULL'", "]", "and", "ttFont", "[", "'hmtx'", "]", ".", "metrics", "[", "g", "]", "[", "0", "]", "!=", "most_common_width", "]", "outliers_ratio", "=", "float", "(", "len", "(", "unusually_spaced_glyphs", ")", ")", "/", "num_glyphs", "if", "outliers_ratio", ">", "0", ":", "failed", "=", "True", "yield", "WARN", ",", "Message", "(", "\"mono-outliers\"", ",", "(", "\"Font is monospaced but {} glyphs\"", "\" ({}%) have a different width.\"", "\" You should check the widths of:\"", "\" {}\"", ")", ".", "format", "(", "len", "(", "unusually_spaced_glyphs", ")", ",", "100.0", "*", "outliers_ratio", ",", "unusually_spaced_glyphs", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "Message", "(", "\"mono-good\"", ",", "(", "\"Font is monospaced and all\"", "\" related metadata look good.\"", ")", ")", "else", ":", "# it is a non-monospaced font, so lets make sure", "# that all monospace-related metadata is properly unset.", "if", "ttFont", "[", "'post'", "]", ".", "isFixedPitch", "!=", "IsFixedWidth", ".", "NOT_MONOSPACED", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"bad-post-isFixedPitch\"", ",", "(", "\"On non-monospaced fonts, the\"", "\" post.isFixedPitch value must be set to {}\"", "\" (not monospaced), but got {} instead.\"", "\"\"", ")", ".", "format", "(", "IsFixedWidth", ".", "NOT_MONOSPACED", ",", "ttFont", "[", "'post'", "]", ".", "isFixedPitch", ")", ")", "if", "ttFont", "[", "'OS/2'", "]", ".", "panose", ".", "bProportion", "==", "PANOSE_Proportion", ".", "MONOSPACED", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"bad-panose-proportion\"", ",", "(", "\"On non-monospaced fonts, the\"", "\" OS/2.panose.bProportion value can be set to \"", "\" any value except 9 (proportion: monospaced)\"", "\" which is the bad value we got in this font.\"", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "Message", "(", "\"good\"", ",", "(", "\"Font is not monospaced and\"", "\" all related metadata look good.\"", ")", ")" ]
Checking correctness of monospaced metadata. There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not trully monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...) Monospace fonts must: * post.isFixedWidth "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)" www.microsoft.com/typography/otspec/post.htm * hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater. www.microsoft.com/typography/otspec/hhea.htm * OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced." www.microsoft.com/typography/otspec/os2.htm#pan monotypecom-test.monotype.de/services/pan2 * OS/2.xAverageWidth must be set accurately. "OS/2.xAverageWidth IS used when rendering monospaced fonts, at least by Windows GDI" http://typedrawers.com/discussion/comment/15397/#Comment_15397 Also we should report an error for glyphs not of average width
[ "Checking", "correctness", "of", "monospaced", "metadata", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L66-L177
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_name_line_breaks
def com_google_fonts_check_name_line_breaks(ttFont): """Name table entries should not contain line-breaks.""" failed = False for name in ttFont["name"].names: string = name.string.decode(name.getEncoding()) if "\n" in string: failed = True yield FAIL, ("Name entry {} on platform {} contains" " a line-break.").format(NameID(name.nameID).name, PlatformID(name.platformID).name) if not failed: yield PASS, ("Name table entries are all single-line" " (no line-breaks found).")
python
def com_google_fonts_check_name_line_breaks(ttFont): """Name table entries should not contain line-breaks.""" failed = False for name in ttFont["name"].names: string = name.string.decode(name.getEncoding()) if "\n" in string: failed = True yield FAIL, ("Name entry {} on platform {} contains" " a line-break.").format(NameID(name.nameID).name, PlatformID(name.platformID).name) if not failed: yield PASS, ("Name table entries are all single-line" " (no line-breaks found).")
[ "def", "com_google_fonts_check_name_line_breaks", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "name", "in", "ttFont", "[", "\"name\"", "]", ".", "names", ":", "string", "=", "name", ".", "string", ".", "decode", "(", "name", ".", "getEncoding", "(", ")", ")", "if", "\"\\n\"", "in", "string", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Name entry {} on platform {} contains\"", "\" a line-break.\"", ")", ".", "format", "(", "NameID", "(", "name", ".", "nameID", ")", ".", "name", ",", "PlatformID", "(", "name", ".", "platformID", ")", ".", "name", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Name table entries are all single-line\"", "\" (no line-breaks found).\"", ")" ]
Name table entries should not contain line-breaks.
[ "Name", "table", "entries", "should", "not", "contain", "line", "-", "breaks", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L183-L195
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_name_match_familyname_fullfont
def com_google_fonts_check_name_match_familyname_fullfont(ttFont): """Does full font name begin with the font family name?""" from fontbakery.utils import get_name_entry_strings familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) fullfontname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(familyname) == 0: yield FAIL, Message("no-font-family-name", ("Font lacks a NameID.FONT_FAMILY_NAME" " entry in the 'name' table.")) elif len(fullfontname) == 0: yield FAIL, Message("no-full-font-name", ("Font lacks a NameID.FULL_FONT_NAME" " entry in the 'name' table.")) else: # we probably should check all found values are equivalent. # and, in that case, then performing the rest of the check # with only the first occurences of the name entries # will suffice: fullfontname = fullfontname[0] familyname = familyname[0] if not fullfontname.startswith(familyname): yield FAIL, Message( "does-not", (" On the 'name' table, the full font name" " (NameID {} - FULL_FONT_NAME: '{}')" " does not begin with font family name" " (NameID {} - FONT_FAMILY_NAME:" " '{}')".format(NameID.FULL_FONT_NAME, familyname, NameID.FONT_FAMILY_NAME, fullfontname))) else: yield PASS, "Full font name begins with the font family name."
python
def com_google_fonts_check_name_match_familyname_fullfont(ttFont): """Does full font name begin with the font family name?""" from fontbakery.utils import get_name_entry_strings familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) fullfontname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(familyname) == 0: yield FAIL, Message("no-font-family-name", ("Font lacks a NameID.FONT_FAMILY_NAME" " entry in the 'name' table.")) elif len(fullfontname) == 0: yield FAIL, Message("no-full-font-name", ("Font lacks a NameID.FULL_FONT_NAME" " entry in the 'name' table.")) else: # we probably should check all found values are equivalent. # and, in that case, then performing the rest of the check # with only the first occurences of the name entries # will suffice: fullfontname = fullfontname[0] familyname = familyname[0] if not fullfontname.startswith(familyname): yield FAIL, Message( "does-not", (" On the 'name' table, the full font name" " (NameID {} - FULL_FONT_NAME: '{}')" " does not begin with font family name" " (NameID {} - FONT_FAMILY_NAME:" " '{}')".format(NameID.FULL_FONT_NAME, familyname, NameID.FONT_FAMILY_NAME, fullfontname))) else: yield PASS, "Full font name begins with the font family name."
[ "def", "com_google_fonts_check_name_match_familyname_fullfont", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "familyname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "fullfontname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "if", "len", "(", "familyname", ")", "==", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"no-font-family-name\"", ",", "(", "\"Font lacks a NameID.FONT_FAMILY_NAME\"", "\" entry in the 'name' table.\"", ")", ")", "elif", "len", "(", "fullfontname", ")", "==", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"no-full-font-name\"", ",", "(", "\"Font lacks a NameID.FULL_FONT_NAME\"", "\" entry in the 'name' table.\"", ")", ")", "else", ":", "# we probably should check all found values are equivalent.", "# and, in that case, then performing the rest of the check", "# with only the first occurences of the name entries", "# will suffice:", "fullfontname", "=", "fullfontname", "[", "0", "]", "familyname", "=", "familyname", "[", "0", "]", "if", "not", "fullfontname", ".", "startswith", "(", "familyname", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"does-not\"", ",", "(", "\" On the 'name' table, the full font name\"", "\" (NameID {} - FULL_FONT_NAME: '{}')\"", "\" does not begin with font family name\"", "\" (NameID {} - FONT_FAMILY_NAME:\"", "\" '{}')\"", ".", "format", "(", "NameID", ".", "FULL_FONT_NAME", ",", "familyname", ",", "NameID", ".", "FONT_FAMILY_NAME", ",", "fullfontname", ")", ")", ")", "else", ":", "yield", "PASS", ",", "\"Full font name begins with the font family name.\"" ]
Does full font name begin with the font family name?
[ "Does", "full", "font", "name", "begin", "with", "the", "font", "family", "name?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L201-L232
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_family_naming_recommendations
def com_google_fonts_check_family_naming_recommendations(ttFont): """Font follows the family naming recommendations?""" # See http://forum.fontlab.com/index.php?topic=313.0 import re from fontbakery.utils import get_name_entry_strings bad_entries = [] # <Postscript name> may contain only a-zA-Z0-9 # and one hyphen bad_psname = re.compile("[^A-Za-z0-9-]") for string in get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME): if bad_psname.search(string): bad_entries.append({ 'field': 'PostScript Name', 'value': string, 'rec': ('May contain only a-zA-Z0-9' ' characters and an hyphen.') }) if string.count('-') > 1: bad_entries.append({ 'field': 'Postscript Name', 'value': string, 'rec': ('May contain not more' ' than a single hyphen') }) for string in get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME): if len(string) >= 64: bad_entries.append({ 'field': 'Full Font Name', 'value': string, 'rec': 'exceeds max length (63)' }) for string in get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME): if len(string) >= 30: bad_entries.append({ 'field': 'PostScript Name', 'value': string, 'rec': 'exceeds max length (29)' }) for string in get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'Family Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.FONT_SUBFAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'Style Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'OT Family Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_SUBFAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'OT Style Name', 'value': string, 'rec': 'exceeds max length (31)' }) if len(bad_entries) > 0: table = "| Field | Value | Recommendation |\n" table += "|:----- |:----- |:-------------- |\n" for bad in bad_entries: table += "| {} | {} | {} |\n".format(bad["field"], bad["value"], bad["rec"]) yield INFO, ("Font does not follow " "some family naming recommendations:\n\n" "{}").format(table) else: yield PASS, "Font follows the family naming recommendations."
python
def com_google_fonts_check_family_naming_recommendations(ttFont): """Font follows the family naming recommendations?""" # See http://forum.fontlab.com/index.php?topic=313.0 import re from fontbakery.utils import get_name_entry_strings bad_entries = [] # <Postscript name> may contain only a-zA-Z0-9 # and one hyphen bad_psname = re.compile("[^A-Za-z0-9-]") for string in get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME): if bad_psname.search(string): bad_entries.append({ 'field': 'PostScript Name', 'value': string, 'rec': ('May contain only a-zA-Z0-9' ' characters and an hyphen.') }) if string.count('-') > 1: bad_entries.append({ 'field': 'Postscript Name', 'value': string, 'rec': ('May contain not more' ' than a single hyphen') }) for string in get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME): if len(string) >= 64: bad_entries.append({ 'field': 'Full Font Name', 'value': string, 'rec': 'exceeds max length (63)' }) for string in get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME): if len(string) >= 30: bad_entries.append({ 'field': 'PostScript Name', 'value': string, 'rec': 'exceeds max length (29)' }) for string in get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'Family Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.FONT_SUBFAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'Style Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'OT Family Name', 'value': string, 'rec': 'exceeds max length (31)' }) for string in get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_SUBFAMILY_NAME): if len(string) >= 32: bad_entries.append({ 'field': 'OT Style Name', 'value': string, 'rec': 'exceeds max length (31)' }) if len(bad_entries) > 0: table = "| Field | Value | Recommendation |\n" table += "|:----- |:----- |:-------------- |\n" for bad in bad_entries: table += "| {} | {} | {} |\n".format(bad["field"], bad["value"], bad["rec"]) yield INFO, ("Font does not follow " "some family naming recommendations:\n\n" "{}").format(table) else: yield PASS, "Font follows the family naming recommendations."
[ "def", "com_google_fonts_check_family_naming_recommendations", "(", "ttFont", ")", ":", "# See http://forum.fontlab.com/index.php?topic=313.0", "import", "re", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "bad_entries", "=", "[", "]", "# <Postscript name> may contain only a-zA-Z0-9", "# and one hyphen", "bad_psname", "=", "re", ".", "compile", "(", "\"[^A-Za-z0-9-]\"", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "POSTSCRIPT_NAME", ")", ":", "if", "bad_psname", ".", "search", "(", "string", ")", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'PostScript Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "(", "'May contain only a-zA-Z0-9'", "' characters and an hyphen.'", ")", "}", ")", "if", "string", ".", "count", "(", "'-'", ")", ">", "1", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'Postscript Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "(", "'May contain not more'", "' than a single hyphen'", ")", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "64", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'Full Font Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (63)'", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "POSTSCRIPT_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "30", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'PostScript Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (29)'", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "32", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'Family Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (31)'", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_SUBFAMILY_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "32", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'Style Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (31)'", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "32", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'OT Family Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (31)'", "}", ")", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "TYPOGRAPHIC_SUBFAMILY_NAME", ")", ":", "if", "len", "(", "string", ")", ">=", "32", ":", "bad_entries", ".", "append", "(", "{", "'field'", ":", "'OT Style Name'", ",", "'value'", ":", "string", ",", "'rec'", ":", "'exceeds max length (31)'", "}", ")", "if", "len", "(", "bad_entries", ")", ">", "0", ":", "table", "=", "\"| Field | Value | Recommendation |\\n\"", "table", "+=", "\"|:----- |:----- |:-------------- |\\n\"", "for", "bad", "in", "bad_entries", ":", "table", "+=", "\"| {} | {} | {} |\\n\"", ".", "format", "(", "bad", "[", "\"field\"", "]", ",", "bad", "[", "\"value\"", "]", ",", "bad", "[", "\"rec\"", "]", ")", "yield", "INFO", ",", "(", "\"Font does not follow \"", "\"some family naming recommendations:\\n\\n\"", "\"{}\"", ")", ".", "format", "(", "table", ")", "else", ":", "yield", "PASS", ",", "\"Font follows the family naming recommendations.\"" ]
Font follows the family naming recommendations?
[ "Font", "follows", "the", "family", "naming", "recommendations?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L238-L332
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_google_fonts_check_name_rfn
def com_google_fonts_check_name_rfn(ttFont): """Name table strings must not contain the string 'Reserved Font Name'.""" failed = False for entry in ttFont["name"].names: string = entry.toUnicode() if "reserved font name" in string.lower(): yield WARN, ("Name table entry (\"{}\")" " contains \"Reserved Font Name\"." " This is an error except in a few specific" " rare cases.").format(string) failed = True if not failed: yield PASS, ("None of the name table strings" " contain \"Reserved Font Name\".")
python
def com_google_fonts_check_name_rfn(ttFont): """Name table strings must not contain the string 'Reserved Font Name'.""" failed = False for entry in ttFont["name"].names: string = entry.toUnicode() if "reserved font name" in string.lower(): yield WARN, ("Name table entry (\"{}\")" " contains \"Reserved Font Name\"." " This is an error except in a few specific" " rare cases.").format(string) failed = True if not failed: yield PASS, ("None of the name table strings" " contain \"Reserved Font Name\".")
[ "def", "com_google_fonts_check_name_rfn", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "entry", "in", "ttFont", "[", "\"name\"", "]", ".", "names", ":", "string", "=", "entry", ".", "toUnicode", "(", ")", "if", "\"reserved font name\"", "in", "string", ".", "lower", "(", ")", ":", "yield", "WARN", ",", "(", "\"Name table entry (\\\"{}\\\")\"", "\" contains \\\"Reserved Font Name\\\".\"", "\" This is an error except in a few specific\"", "\" rare cases.\"", ")", ".", "format", "(", "string", ")", "failed", "=", "True", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"None of the name table strings\"", "\" contain \\\"Reserved Font Name\\\".\"", ")" ]
Name table strings must not contain the string 'Reserved Font Name'.
[ "Name", "table", "strings", "must", "not", "contain", "the", "string", "Reserved", "Font", "Name", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L338-L351
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
com_adobe_fonts_check_family_max_4_fonts_per_family_name
def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts): """Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts""" from collections import Counter from fontbakery.utils import get_name_entry_strings failed = False family_names = list() for ttFont in ttFonts: names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) # names_list will likely contain multiple entries, e.g. multiple copies # of the same name in the same language for different platforms, but # also different names in different languages, we use set() below # to remove the duplicates and only store the unique family name(s) # used for a given font names_set = set(names_list) family_names.extend(names_set) counter = Counter(family_names) for family_name, count in counter.items(): if count > 4: failed = True yield FAIL, ("Family '{}' has {} fonts (should be 4 or fewer)." ).format(family_name, count) if not failed: yield PASS, ("There were no more than 4 fonts per family name.")
python
def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts): """Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts""" from collections import Counter from fontbakery.utils import get_name_entry_strings failed = False family_names = list() for ttFont in ttFonts: names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) # names_list will likely contain multiple entries, e.g. multiple copies # of the same name in the same language for different platforms, but # also different names in different languages, we use set() below # to remove the duplicates and only store the unique family name(s) # used for a given font names_set = set(names_list) family_names.extend(names_set) counter = Counter(family_names) for family_name, count in counter.items(): if count > 4: failed = True yield FAIL, ("Family '{}' has {} fonts (should be 4 or fewer)." ).format(family_name, count) if not failed: yield PASS, ("There were no more than 4 fonts per family name.")
[ "def", "com_adobe_fonts_check_family_max_4_fonts_per_family_name", "(", "ttFonts", ")", ":", "from", "collections", "import", "Counter", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "failed", "=", "False", "family_names", "=", "list", "(", ")", "for", "ttFont", "in", "ttFonts", ":", "names_list", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "# names_list will likely contain multiple entries, e.g. multiple copies", "# of the same name in the same language for different platforms, but", "# also different names in different languages, we use set() below", "# to remove the duplicates and only store the unique family name(s)", "# used for a given font", "names_set", "=", "set", "(", "names_list", ")", "family_names", ".", "extend", "(", "names_set", ")", "counter", "=", "Counter", "(", "family_names", ")", "for", "family_name", ",", "count", "in", "counter", ".", "items", "(", ")", ":", "if", "count", ">", "4", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Family '{}' has {} fonts (should be 4 or fewer).\"", ")", ".", "format", "(", "family_name", ",", "count", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"There were no more than 4 fonts per family name.\"", ")" ]
Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts
[ "Verify", "that", "each", "group", "of", "fonts", "with", "the", "same", "nameID", "1", "has", "maximum", "of", "4", "fonts" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L421-L445
train
googlefonts/fontbakery
Lib/fontbakery/profiles/cmap.py
com_google_fonts_check_family_equal_unicode_encodings
def com_google_fonts_check_family_equal_unicode_encodings(ttFonts): """Fonts have equal unicode encodings?""" encoding = None failed = False for ttFont in ttFonts: cmap = None for table in ttFont['cmap'].tables: if table.format == 4: cmap = table break # Could a font lack a format 4 cmap table ? # If we ever find one of those, it would crash the check here. # Then we'd have to yield a FAIL regarding the missing table entry. if not encoding: encoding = cmap.platEncID if encoding != cmap.platEncID: failed = True if failed: yield FAIL, "Fonts have different unicode encodings." else: yield PASS, "Fonts have equal unicode encodings."
python
def com_google_fonts_check_family_equal_unicode_encodings(ttFonts): """Fonts have equal unicode encodings?""" encoding = None failed = False for ttFont in ttFonts: cmap = None for table in ttFont['cmap'].tables: if table.format == 4: cmap = table break # Could a font lack a format 4 cmap table ? # If we ever find one of those, it would crash the check here. # Then we'd have to yield a FAIL regarding the missing table entry. if not encoding: encoding = cmap.platEncID if encoding != cmap.platEncID: failed = True if failed: yield FAIL, "Fonts have different unicode encodings." else: yield PASS, "Fonts have equal unicode encodings."
[ "def", "com_google_fonts_check_family_equal_unicode_encodings", "(", "ttFonts", ")", ":", "encoding", "=", "None", "failed", "=", "False", "for", "ttFont", "in", "ttFonts", ":", "cmap", "=", "None", "for", "table", "in", "ttFont", "[", "'cmap'", "]", ".", "tables", ":", "if", "table", ".", "format", "==", "4", ":", "cmap", "=", "table", "break", "# Could a font lack a format 4 cmap table ?", "# If we ever find one of those, it would crash the check here.", "# Then we'd have to yield a FAIL regarding the missing table entry.", "if", "not", "encoding", ":", "encoding", "=", "cmap", ".", "platEncID", "if", "encoding", "!=", "cmap", ".", "platEncID", ":", "failed", "=", "True", "if", "failed", ":", "yield", "FAIL", ",", "\"Fonts have different unicode encodings.\"", "else", ":", "yield", "PASS", ",", "\"Fonts have equal unicode encodings.\"" ]
Fonts have equal unicode encodings?
[ "Fonts", "have", "equal", "unicode", "encodings?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/cmap.py#L10-L30
train
googlefonts/fontbakery
Lib/fontbakery/profiles/cmap.py
com_google_fonts_check_all_glyphs_have_codepoints
def com_google_fonts_check_all_glyphs_have_codepoints(ttFont): """Check all glyphs have codepoints assigned.""" failed = False for subtable in ttFont['cmap'].tables: if subtable.isUnicode(): for item in subtable.cmap.items(): codepoint = item[0] if codepoint is None: failed = True yield FAIL, ("Glyph {} lacks a unicode" " codepoint assignment").format(codepoint) if not failed: yield PASS, "All glyphs have a codepoint value assigned."
python
def com_google_fonts_check_all_glyphs_have_codepoints(ttFont): """Check all glyphs have codepoints assigned.""" failed = False for subtable in ttFont['cmap'].tables: if subtable.isUnicode(): for item in subtable.cmap.items(): codepoint = item[0] if codepoint is None: failed = True yield FAIL, ("Glyph {} lacks a unicode" " codepoint assignment").format(codepoint) if not failed: yield PASS, "All glyphs have a codepoint value assigned."
[ "def", "com_google_fonts_check_all_glyphs_have_codepoints", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "subtable", "in", "ttFont", "[", "'cmap'", "]", ".", "tables", ":", "if", "subtable", ".", "isUnicode", "(", ")", ":", "for", "item", "in", "subtable", ".", "cmap", ".", "items", "(", ")", ":", "codepoint", "=", "item", "[", "0", "]", "if", "codepoint", "is", "None", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Glyph {} lacks a unicode\"", "\" codepoint assignment\"", ")", ".", "format", "(", "codepoint", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"All glyphs have a codepoint value assigned.\"" ]
Check all glyphs have codepoints assigned.
[ "Check", "all", "glyphs", "have", "codepoints", "assigned", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/cmap.py#L42-L54
train
googlefonts/fontbakery
Lib/fontbakery/reporters/__init__.py
FontbakeryReporter.run
def run(self, order=None): """ self.runner must be present """ for event in self.runner.run(order=order): self.receive(event)
python
def run(self, order=None): """ self.runner must be present """ for event in self.runner.run(order=order): self.receive(event)
[ "def", "run", "(", "self", ",", "order", "=", "None", ")", ":", "for", "event", "in", "self", ".", "runner", ".", "run", "(", "order", "=", "order", ")", ":", "self", ".", "receive", "(", "event", ")" ]
self.runner must be present
[ "self", ".", "runner", "must", "be", "present" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/reporters/__init__.py#L45-L50
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_name_trailing_spaces
def com_google_fonts_check_name_trailing_spaces(ttFont): """Name table records must not have trailing spaces.""" failed = False for name_record in ttFont['name'].names: name_string = name_record.toUnicode() if name_string != name_string.strip(): failed = True name_key = tuple([name_record.platformID, name_record.platEncID, name_record.langID, name_record.nameID]) shortened_str = name_record.toUnicode() if len(shortened_str) > 20: shortened_str = shortened_str[:10] + "[...]" + shortened_str[-10:] yield FAIL, (f"Name table record with key = {name_key} has" " trailing spaces that must be removed:" f" '{shortened_str}'") if not failed: yield PASS, ("No trailing spaces on name table entries.")
python
def com_google_fonts_check_name_trailing_spaces(ttFont): """Name table records must not have trailing spaces.""" failed = False for name_record in ttFont['name'].names: name_string = name_record.toUnicode() if name_string != name_string.strip(): failed = True name_key = tuple([name_record.platformID, name_record.platEncID, name_record.langID, name_record.nameID]) shortened_str = name_record.toUnicode() if len(shortened_str) > 20: shortened_str = shortened_str[:10] + "[...]" + shortened_str[-10:] yield FAIL, (f"Name table record with key = {name_key} has" " trailing spaces that must be removed:" f" '{shortened_str}'") if not failed: yield PASS, ("No trailing spaces on name table entries.")
[ "def", "com_google_fonts_check_name_trailing_spaces", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "name_record", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "name_string", "=", "name_record", ".", "toUnicode", "(", ")", "if", "name_string", "!=", "name_string", ".", "strip", "(", ")", ":", "failed", "=", "True", "name_key", "=", "tuple", "(", "[", "name_record", ".", "platformID", ",", "name_record", ".", "platEncID", ",", "name_record", ".", "langID", ",", "name_record", ".", "nameID", "]", ")", "shortened_str", "=", "name_record", ".", "toUnicode", "(", ")", "if", "len", "(", "shortened_str", ")", ">", "20", ":", "shortened_str", "=", "shortened_str", "[", ":", "10", "]", "+", "\"[...]\"", "+", "shortened_str", "[", "-", "10", ":", "]", "yield", "FAIL", ",", "(", "f\"Name table record with key = {name_key} has\"", "\" trailing spaces that must be removed:\"", "f\" '{shortened_str}'\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"No trailing spaces on name table entries.\"", ")" ]
Name table records must not have trailing spaces.
[ "Name", "table", "records", "must", "not", "have", "trailing", "spaces", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L45-L61
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_family_single_directory
def com_google_fonts_check_family_single_directory(fonts): """Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories). """ directories = [] for target_file in fonts: directory = os.path.dirname(target_file) if directory not in directories: directories.append(directory) if len(directories) == 1: yield PASS, "All files are in the same directory." else: yield FAIL, ("Not all fonts passed in the command line" " are in the same directory. This may lead to" " bad results as the tool will interpret all" " font files as belonging to a single" " font family. The detected directories are:" " {}".format(directories))
python
def com_google_fonts_check_family_single_directory(fonts): """Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories). """ directories = [] for target_file in fonts: directory = os.path.dirname(target_file) if directory not in directories: directories.append(directory) if len(directories) == 1: yield PASS, "All files are in the same directory." else: yield FAIL, ("Not all fonts passed in the command line" " are in the same directory. This may lead to" " bad results as the tool will interpret all" " font files as belonging to a single" " font family. The detected directories are:" " {}".format(directories))
[ "def", "com_google_fonts_check_family_single_directory", "(", "fonts", ")", ":", "directories", "=", "[", "]", "for", "target_file", "in", "fonts", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "target_file", ")", "if", "directory", "not", "in", "directories", ":", "directories", ".", "append", "(", "directory", ")", "if", "len", "(", "directories", ")", "==", "1", ":", "yield", "PASS", ",", "\"All files are in the same directory.\"", "else", ":", "yield", "FAIL", ",", "(", "\"Not all fonts passed in the command line\"", "\" are in the same directory. This may lead to\"", "\" bad results as the tool will interpret all\"", "\" font files as belonging to a single\"", "\" font family. The detected directories are:\"", "\" {}\"", ".", "format", "(", "directories", ")", ")" ]
Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories).
[ "Checking", "all", "files", "are", "in", "the", "same", "directory", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L194-L218
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_ftxvalidator
def com_google_fonts_check_ftxvalidator(font): """Checking with ftxvalidator.""" import plistlib try: import subprocess ftx_cmd = [ "ftxvalidator", "-t", "all", # execute all checks font ] ftx_output = subprocess.check_output(ftx_cmd, stderr=subprocess.STDOUT) ftx_data = plistlib.loads(ftx_output) # we accept kATSFontTestSeverityInformation # and kATSFontTestSeverityMinorError if 'kATSFontTestSeverityFatalError' \ not in ftx_data['kATSFontTestResultKey']: yield PASS, "ftxvalidator passed this file" else: ftx_cmd = [ "ftxvalidator", "-T", # Human-readable output "-r", # Generate a full report "-t", "all", # execute all checks font ] ftx_output = subprocess.check_output(ftx_cmd, stderr=subprocess.STDOUT) yield FAIL, f"ftxvalidator output follows:\n\n{ftx_output}\n" except subprocess.CalledProcessError as e: yield ERROR, ("ftxvalidator returned an error code. Output follows:" "\n\n{}\n").format(e.output.decode('utf-8')) except OSError: yield ERROR, "ftxvalidator is not available!"
python
def com_google_fonts_check_ftxvalidator(font): """Checking with ftxvalidator.""" import plistlib try: import subprocess ftx_cmd = [ "ftxvalidator", "-t", "all", # execute all checks font ] ftx_output = subprocess.check_output(ftx_cmd, stderr=subprocess.STDOUT) ftx_data = plistlib.loads(ftx_output) # we accept kATSFontTestSeverityInformation # and kATSFontTestSeverityMinorError if 'kATSFontTestSeverityFatalError' \ not in ftx_data['kATSFontTestResultKey']: yield PASS, "ftxvalidator passed this file" else: ftx_cmd = [ "ftxvalidator", "-T", # Human-readable output "-r", # Generate a full report "-t", "all", # execute all checks font ] ftx_output = subprocess.check_output(ftx_cmd, stderr=subprocess.STDOUT) yield FAIL, f"ftxvalidator output follows:\n\n{ftx_output}\n" except subprocess.CalledProcessError as e: yield ERROR, ("ftxvalidator returned an error code. Output follows:" "\n\n{}\n").format(e.output.decode('utf-8')) except OSError: yield ERROR, "ftxvalidator is not available!"
[ "def", "com_google_fonts_check_ftxvalidator", "(", "font", ")", ":", "import", "plistlib", "try", ":", "import", "subprocess", "ftx_cmd", "=", "[", "\"ftxvalidator\"", ",", "\"-t\"", ",", "\"all\"", ",", "# execute all checks", "font", "]", "ftx_output", "=", "subprocess", ".", "check_output", "(", "ftx_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "ftx_data", "=", "plistlib", ".", "loads", "(", "ftx_output", ")", "# we accept kATSFontTestSeverityInformation", "# and kATSFontTestSeverityMinorError", "if", "'kATSFontTestSeverityFatalError'", "not", "in", "ftx_data", "[", "'kATSFontTestResultKey'", "]", ":", "yield", "PASS", ",", "\"ftxvalidator passed this file\"", "else", ":", "ftx_cmd", "=", "[", "\"ftxvalidator\"", ",", "\"-T\"", ",", "# Human-readable output", "\"-r\"", ",", "# Generate a full report", "\"-t\"", ",", "\"all\"", ",", "# execute all checks", "font", "]", "ftx_output", "=", "subprocess", ".", "check_output", "(", "ftx_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "yield", "FAIL", ",", "f\"ftxvalidator output follows:\\n\\n{ftx_output}\\n\"", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "yield", "ERROR", ",", "(", "\"ftxvalidator returned an error code. Output follows:\"", "\"\\n\\n{}\\n\"", ")", ".", "format", "(", "e", ".", "output", ".", "decode", "(", "'utf-8'", ")", ")", "except", "OSError", ":", "yield", "ERROR", ",", "\"ftxvalidator is not available!\"" ]
Checking with ftxvalidator.
[ "Checking", "with", "ftxvalidator", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L258-L292
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_ots
def com_google_fonts_check_ots(font): """Checking with ots-sanitize.""" import ots try: process = ots.sanitize(font, check=True, capture_output=True) except ots.CalledProcessError as e: yield FAIL, ( "ots-sanitize returned an error code ({}). Output follows:\n\n{}{}" ).format(e.returncode, e.stderr.decode(), e.stdout.decode()) else: if process.stderr: yield WARN, ( "ots-sanitize passed this file, however warnings were printed:\n\n{}" ).format(process.stderr.decode()) else: yield PASS, "ots-sanitize passed this file"
python
def com_google_fonts_check_ots(font): """Checking with ots-sanitize.""" import ots try: process = ots.sanitize(font, check=True, capture_output=True) except ots.CalledProcessError as e: yield FAIL, ( "ots-sanitize returned an error code ({}). Output follows:\n\n{}{}" ).format(e.returncode, e.stderr.decode(), e.stdout.decode()) else: if process.stderr: yield WARN, ( "ots-sanitize passed this file, however warnings were printed:\n\n{}" ).format(process.stderr.decode()) else: yield PASS, "ots-sanitize passed this file"
[ "def", "com_google_fonts_check_ots", "(", "font", ")", ":", "import", "ots", "try", ":", "process", "=", "ots", ".", "sanitize", "(", "font", ",", "check", "=", "True", ",", "capture_output", "=", "True", ")", "except", "ots", ".", "CalledProcessError", "as", "e", ":", "yield", "FAIL", ",", "(", "\"ots-sanitize returned an error code ({}). Output follows:\\n\\n{}{}\"", ")", ".", "format", "(", "e", ".", "returncode", ",", "e", ".", "stderr", ".", "decode", "(", ")", ",", "e", ".", "stdout", ".", "decode", "(", ")", ")", "else", ":", "if", "process", ".", "stderr", ":", "yield", "WARN", ",", "(", "\"ots-sanitize passed this file, however warnings were printed:\\n\\n{}\"", ")", ".", "format", "(", "process", ".", "stderr", ".", "decode", "(", ")", ")", "else", ":", "yield", "PASS", ",", "\"ots-sanitize passed this file\"" ]
Checking with ots-sanitize.
[ "Checking", "with", "ots", "-", "sanitize", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L298-L314
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_fontbakery_version
def com_google_fonts_check_fontbakery_version(): """Do we have the latest version of FontBakery installed?""" try: import subprocess installed_str = None latest_str = None is_latest = False failed = False pip_cmd = ["pip", "search", "fontbakery"] pip_output = subprocess.check_output(pip_cmd, stderr=subprocess.STDOUT) for line in pip_output.decode().split('\n'): if 'INSTALLED' in line: installed_str = line.split('INSTALLED')[1].strip() if 'LATEST' in line: latest_str = line.split('LATEST')[1].strip() if '(latest)' in line: is_latest = True if not (is_latest or is_up_to_date(installed_str, latest_str)): failed = True yield FAIL, (f"Current Font Bakery version is {installed_str}," f" while a newer {latest_str} is already available." " Please upgrade it with 'pip install -U fontbakery'") yield INFO, pip_output.decode() except subprocess.CalledProcessError as e: yield ERROR, ("Running 'pip search fontbakery' returned an error code." " Output follows :\n\n{}\n").format(e.output.decode()) if not failed: yield PASS, "Font Bakery is up-to-date"
python
def com_google_fonts_check_fontbakery_version(): """Do we have the latest version of FontBakery installed?""" try: import subprocess installed_str = None latest_str = None is_latest = False failed = False pip_cmd = ["pip", "search", "fontbakery"] pip_output = subprocess.check_output(pip_cmd, stderr=subprocess.STDOUT) for line in pip_output.decode().split('\n'): if 'INSTALLED' in line: installed_str = line.split('INSTALLED')[1].strip() if 'LATEST' in line: latest_str = line.split('LATEST')[1].strip() if '(latest)' in line: is_latest = True if not (is_latest or is_up_to_date(installed_str, latest_str)): failed = True yield FAIL, (f"Current Font Bakery version is {installed_str}," f" while a newer {latest_str} is already available." " Please upgrade it with 'pip install -U fontbakery'") yield INFO, pip_output.decode() except subprocess.CalledProcessError as e: yield ERROR, ("Running 'pip search fontbakery' returned an error code." " Output follows :\n\n{}\n").format(e.output.decode()) if not failed: yield PASS, "Font Bakery is up-to-date"
[ "def", "com_google_fonts_check_fontbakery_version", "(", ")", ":", "try", ":", "import", "subprocess", "installed_str", "=", "None", "latest_str", "=", "None", "is_latest", "=", "False", "failed", "=", "False", "pip_cmd", "=", "[", "\"pip\"", ",", "\"search\"", ",", "\"fontbakery\"", "]", "pip_output", "=", "subprocess", ".", "check_output", "(", "pip_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "for", "line", "in", "pip_output", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "if", "'INSTALLED'", "in", "line", ":", "installed_str", "=", "line", ".", "split", "(", "'INSTALLED'", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "'LATEST'", "in", "line", ":", "latest_str", "=", "line", ".", "split", "(", "'LATEST'", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "'(latest)'", "in", "line", ":", "is_latest", "=", "True", "if", "not", "(", "is_latest", "or", "is_up_to_date", "(", "installed_str", ",", "latest_str", ")", ")", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "f\"Current Font Bakery version is {installed_str},\"", "f\" while a newer {latest_str} is already available.\"", "\" Please upgrade it with 'pip install -U fontbakery'\"", ")", "yield", "INFO", ",", "pip_output", ".", "decode", "(", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "yield", "ERROR", ",", "(", "\"Running 'pip search fontbakery' returned an error code.\"", "\" Output follows :\\n\\n{}\\n\"", ")", ".", "format", "(", "e", ".", "output", ".", "decode", "(", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"Font Bakery is up-to-date\"" ]
Do we have the latest version of FontBakery installed?
[ "Do", "we", "have", "the", "latest", "version", "of", "FontBakery", "installed?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L343-L373
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_fontforge_stderr
def com_google_fonts_check_fontforge_stderr(font, fontforge_check_results): """FontForge validation outputs error messages?""" if "skip" in fontforge_check_results: yield SKIP, fontforge_check_results["skip"] return filtered_err_msgs = "" for line in fontforge_check_results["ff_err_messages"].split('\n'): if ('The following table(s) in the font' ' have been ignored by FontForge') in line: continue if "Ignoring 'DSIG' digital signature table" in line: continue filtered_err_msgs += line + '\n' if len(filtered_err_msgs.strip()) > 0: yield WARN, ("FontForge seems to dislike certain aspects of this font file." " The actual meaning of the log messages below is not always" " clear and may require further investigation.\n\n" "{}").format(filtered_err_msgs) else: yield PASS, "FontForge validation did not output any error message."
python
def com_google_fonts_check_fontforge_stderr(font, fontforge_check_results): """FontForge validation outputs error messages?""" if "skip" in fontforge_check_results: yield SKIP, fontforge_check_results["skip"] return filtered_err_msgs = "" for line in fontforge_check_results["ff_err_messages"].split('\n'): if ('The following table(s) in the font' ' have been ignored by FontForge') in line: continue if "Ignoring 'DSIG' digital signature table" in line: continue filtered_err_msgs += line + '\n' if len(filtered_err_msgs.strip()) > 0: yield WARN, ("FontForge seems to dislike certain aspects of this font file." " The actual meaning of the log messages below is not always" " clear and may require further investigation.\n\n" "{}").format(filtered_err_msgs) else: yield PASS, "FontForge validation did not output any error message."
[ "def", "com_google_fonts_check_fontforge_stderr", "(", "font", ",", "fontforge_check_results", ")", ":", "if", "\"skip\"", "in", "fontforge_check_results", ":", "yield", "SKIP", ",", "fontforge_check_results", "[", "\"skip\"", "]", "return", "filtered_err_msgs", "=", "\"\"", "for", "line", "in", "fontforge_check_results", "[", "\"ff_err_messages\"", "]", ".", "split", "(", "'\\n'", ")", ":", "if", "(", "'The following table(s) in the font'", "' have been ignored by FontForge'", ")", "in", "line", ":", "continue", "if", "\"Ignoring 'DSIG' digital signature table\"", "in", "line", ":", "continue", "filtered_err_msgs", "+=", "line", "+", "'\\n'", "if", "len", "(", "filtered_err_msgs", ".", "strip", "(", ")", ")", ">", "0", ":", "yield", "WARN", ",", "(", "\"FontForge seems to dislike certain aspects of this font file.\"", "\" The actual meaning of the log messages below is not always\"", "\" clear and may require further investigation.\\n\\n\"", "\"{}\"", ")", ".", "format", "(", "filtered_err_msgs", ")", "else", ":", "yield", "PASS", ",", "\"FontForge validation did not output any error message.\"" ]
FontForge validation outputs error messages?
[ "FontForge", "validation", "outputs", "error", "messages?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L380-L401
train
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
com_google_fonts_check_mandatory_glyphs
def com_google_fonts_check_mandatory_glyphs(ttFont): """Font contains .notdef as first glyph? The OpenType specification v1.8.2 recommends that the first glyph is the .notdef glyph without a codepoint assigned and with a drawing. https://docs.microsoft.com/en-us/typography/opentype/spec/recom#glyph-0-the-notdef-glyph Pre-v1.8, it was recommended that a font should also contain a .null, CR and space glyph. This might have been relevant for applications on MacOS 9. """ from fontbakery.utils import glyph_has_ink if ( ttFont.getGlyphOrder()[0] == ".notdef" and ".notdef" not in ttFont.getBestCmap().values() and glyph_has_ink(ttFont, ".notdef") ): yield PASS, ( "Font contains the .notdef glyph as the first glyph, it does " "not have a Unicode value assigned and contains a drawing." ) else: yield WARN, ( "Font should contain the .notdef glyph as the first glyph, " "it should not have a Unicode value assigned and should " "contain a drawing." )
python
def com_google_fonts_check_mandatory_glyphs(ttFont): """Font contains .notdef as first glyph? The OpenType specification v1.8.2 recommends that the first glyph is the .notdef glyph without a codepoint assigned and with a drawing. https://docs.microsoft.com/en-us/typography/opentype/spec/recom#glyph-0-the-notdef-glyph Pre-v1.8, it was recommended that a font should also contain a .null, CR and space glyph. This might have been relevant for applications on MacOS 9. """ from fontbakery.utils import glyph_has_ink if ( ttFont.getGlyphOrder()[0] == ".notdef" and ".notdef" not in ttFont.getBestCmap().values() and glyph_has_ink(ttFont, ".notdef") ): yield PASS, ( "Font contains the .notdef glyph as the first glyph, it does " "not have a Unicode value assigned and contains a drawing." ) else: yield WARN, ( "Font should contain the .notdef glyph as the first glyph, " "it should not have a Unicode value assigned and should " "contain a drawing." )
[ "def", "com_google_fonts_check_mandatory_glyphs", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "glyph_has_ink", "if", "(", "ttFont", ".", "getGlyphOrder", "(", ")", "[", "0", "]", "==", "\".notdef\"", "and", "\".notdef\"", "not", "in", "ttFont", ".", "getBestCmap", "(", ")", ".", "values", "(", ")", "and", "glyph_has_ink", "(", "ttFont", ",", "\".notdef\"", ")", ")", ":", "yield", "PASS", ",", "(", "\"Font contains the .notdef glyph as the first glyph, it does \"", "\"not have a Unicode value assigned and contains a drawing.\"", ")", "else", ":", "yield", "WARN", ",", "(", "\"Font should contain the .notdef glyph as the first glyph, \"", "\"it should not have a Unicode value assigned and should \"", "\"contain a drawing.\"", ")" ]
Font contains .notdef as first glyph? The OpenType specification v1.8.2 recommends that the first glyph is the .notdef glyph without a codepoint assigned and with a drawing. https://docs.microsoft.com/en-us/typography/opentype/spec/recom#glyph-0-the-notdef-glyph Pre-v1.8, it was recommended that a font should also contain a .null, CR and space glyph. This might have been relevant for applications on MacOS 9.
[ "Font", "contains", ".", "notdef", "as", "first", "glyph?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L559-L586
train