language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def intersect_datasets(dataset1, dataset2, intersect_on='exam_codes'): """Returns the intersection of two dataset Bunches. The output is a dataset (Bunch). The intersection is on patient id and visit code or date """ if intersect_on not in ['exam_codes', 'exam_dates']: raise ValueError('intersect_on should be either ' 'exam_codes or exam_dates') return -1 if 'subjects' not in dataset1.keys() or 'subjects' not in dataset2.keys(): raise ValueError('Cannot intersect, Subject ID not found !') return -1 if (intersect_on not in dataset1.keys() or intersect_on not in dataset2.keys()): raise ValueError('Cannot intersect,' + intersect_on + ' not found !') return -1 return 0
def intersect_datasets(dataset1, dataset2, intersect_on='exam_codes'): """Returns the intersection of two dataset Bunches. The output is a dataset (Bunch). The intersection is on patient id and visit code or date """ if intersect_on not in ['exam_codes', 'exam_dates']: raise ValueError('intersect_on should be either ' 'exam_codes or exam_dates') return -1 if 'subjects' not in dataset1.keys() or 'subjects' not in dataset2.keys(): raise ValueError('Cannot intersect, Subject ID not found !') return -1 if (intersect_on not in dataset1.keys() or intersect_on not in dataset2.keys()): raise ValueError('Cannot intersect,' + intersect_on + ' not found !') return -1 return 0
Python
def extract_baseline_dataset(dataset): """Returns baseline bunch of a dataset """ # equivalent keys are : 'sc', 'bl', 'scmri' idx = np.hstack((np.where(dataset.exam_codes2 == 'sc'), np.where(dataset.exam_codes2 == 'bl'), np.where(dataset.exam_codes2 == 'scmri'))).ravel() for k in dataset.keys(): dataset[k] = np.array(dataset[k]) dataset[k] = dataset[k][idx] return dataset
def extract_baseline_dataset(dataset): """Returns baseline bunch of a dataset """ # equivalent keys are : 'sc', 'bl', 'scmri' idx = np.hstack((np.where(dataset.exam_codes2 == 'sc'), np.where(dataset.exam_codes2 == 'bl'), np.where(dataset.exam_codes2 == 'scmri'))).ravel() for k in dataset.keys(): dataset[k] = np.array(dataset[k]) dataset[k] = dataset[k][idx] return dataset
Python
def extract_unique_dataset(dataset): """Returns unique bunch of a dataset """ _, unique_idx = np.unique(dataset.subjects, return_index=True) for k in dataset.keys(): dataset[k] = np.array(dataset[k]) dataset[k] = dataset[k][unique_idx] return dataset
def extract_unique_dataset(dataset): """Returns unique bunch of a dataset """ _, unique_idx = np.unique(dataset.subjects, return_index=True) for k in dataset.keys(): dataset[k] = np.array(dataset[k]) dataset[k] = dataset[k][unique_idx] return dataset
Python
def clean_text(tweet_text, strip_usertags = False, strip_hashtags = False): """ Remove sections of the tweet text (clean) based on parameters :param tweet_text: Text for which sentiment should be measured :param strip_usertags: Whether to remove user tags from the tweets. :param strip_hashtags: Whether to remove hashtags from the tweets. """ # Strip all the leading usertags while re.search(r"^@[a-zA-Z0-9_]+", tweet_text): tweet_text = re.sub(r"^@[a-zA-Z0-9_]+", '', tweet_text).strip() # Regex pattern for removing URL's pattern = r"http\S+|pic\.\S+|\xa0|…" if strip_usertags: pattern += r"|@[a-zA-Z0-9_]+" if strip_hashtags: pattern += r"|#[a-zA-Z0-9_]+" tweet_text = re.sub(pattern, '', tweet_text).strip() return tweet_text
def clean_text(tweet_text, strip_usertags = False, strip_hashtags = False): """ Remove sections of the tweet text (clean) based on parameters :param tweet_text: Text for which sentiment should be measured :param strip_usertags: Whether to remove user tags from the tweets. :param strip_hashtags: Whether to remove hashtags from the tweets. """ # Strip all the leading usertags while re.search(r"^@[a-zA-Z0-9_]+", tweet_text): tweet_text = re.sub(r"^@[a-zA-Z0-9_]+", '', tweet_text).strip() # Regex pattern for removing URL's pattern = r"http\S+|pic\.\S+|\xa0|…" if strip_usertags: pattern += r"|@[a-zA-Z0-9_]+" if strip_hashtags: pattern += r"|#[a-zA-Z0-9_]+" tweet_text = re.sub(pattern, '', tweet_text).strip() return tweet_text
Python
def is_statement(tweet_text, nlp): """ Determines if the tweet is a self contained statement :param tweet_text: The tweet's text :param nlp: A spaCy object """ # Get the first sentence in the tweet text doc = nlp(tweet_text) # Create an array of sentence strings sentences = [sent.string.strip() for sent in doc.sents] # Process the first sentence in the tweet doc = nlp(sentences[0]) ## Rule: If the subject is a person, place or thing then pass for token in doc: if token.dep_ == "nsubj": if token.pos_ == "NOUN" or token.pos_ == "PROPN": return 1 ## Rule: If the subject is a personal pronoun, then pass for token in doc: if token.dep_ == "nsubj": if token.text.lower() == "I" or token.text.lower() == "me" or token.text.lower() == "we" or token.text.lower() == "us": return 2 ## Rule: If the first word is a conjunction, then fail # Find the first token in the sentencce that is not punctuation for i, token in enumerate(doc): if token.pos_ != "PUNCT": break # If the first non-punctuoation token is a conjunction if doc[i].pos_ == "CCONJ" or doc[0].pos_ == "CONJ": return -1 ## Rule: If the tweet starts with a dependent clause, then fail # Find the first token in the sentencce that is not punctuation for i, token in enumerate(doc): if token.pos_ != "PUNCT": break # Initialize flags for finding commas and subject to false comma_found = False # Iterate through sentence to find if a comma occurs before the subject for j, token in enumerate(doc): # If the token is not an initial punctuation if j >= i: if token.text == ",": comma_found = True # If the subject is found after a comman, set filter to -1 if token.dep_ == "nsubj" and comma_found == True: return -2 ## Rule: If any of the objects of the sentence are pronouns, the fail for token in doc: if token.dep_ == "dobj" or token.dep_ == "obj": if token.pos_ != "PRON": return -3 ## Rule: If any of the sentence's subjects are pronouns or determiners, ## then fail for token in doc: if token.dep_ == "nsubj" or token.dep_ == "nsubjpass": if token.pos_ == "PRON" or token.pos_ == "DET": return -4 return 0
def is_statement(tweet_text, nlp): """ Determines if the tweet is a self contained statement :param tweet_text: The tweet's text :param nlp: A spaCy object """ # Get the first sentence in the tweet text doc = nlp(tweet_text) # Create an array of sentence strings sentences = [sent.string.strip() for sent in doc.sents] # Process the first sentence in the tweet doc = nlp(sentences[0]) ## Rule: If the subject is a person, place or thing then pass for token in doc: if token.dep_ == "nsubj": if token.pos_ == "NOUN" or token.pos_ == "PROPN": return 1 ## Rule: If the subject is a personal pronoun, then pass for token in doc: if token.dep_ == "nsubj": if token.text.lower() == "I" or token.text.lower() == "me" or token.text.lower() == "we" or token.text.lower() == "us": return 2 ## Rule: If the first word is a conjunction, then fail # Find the first token in the sentencce that is not punctuation for i, token in enumerate(doc): if token.pos_ != "PUNCT": break # If the first non-punctuoation token is a conjunction if doc[i].pos_ == "CCONJ" or doc[0].pos_ == "CONJ": return -1 ## Rule: If the tweet starts with a dependent clause, then fail # Find the first token in the sentencce that is not punctuation for i, token in enumerate(doc): if token.pos_ != "PUNCT": break # Initialize flags for finding commas and subject to false comma_found = False # Iterate through sentence to find if a comma occurs before the subject for j, token in enumerate(doc): # If the token is not an initial punctuation if j >= i: if token.text == ",": comma_found = True # If the subject is found after a comman, set filter to -1 if token.dep_ == "nsubj" and comma_found == True: return -2 ## Rule: If any of the objects of the sentence are pronouns, the fail for token in doc: if token.dep_ == "dobj" or token.dep_ == "obj": if token.pos_ != "PRON": return -3 ## Rule: If any of the sentence's subjects are pronouns or determiners, ## then fail for token in doc: if token.dep_ == "nsubj" or token.dep_ == "nsubjpass": if token.pos_ == "PRON" or token.pos_ == "DET": return -4 return 0
Python
def is_reply(tweet): """ Determines if the tweet is a reply to another tweet. :param tweet: Twint tweet object whose object will be formated """ # A reply's conversation_id is different than the id_str if tweet.conversation_id != tweet.id_str: return True # If not a reply to another user, there will only be 1 entry in reply_to if len(tweet.reply_to) == 1: return False # Check to see if any of the other users "replied" are in the tweet text users = tweet.reply_to[1:] conversations = [user["username"].lower() in tweet.tweet.lower() for user in users] # If any if the usernames are not present in text, then it must be a reply if sum(conversations) < len(users): return True # On older tweets, tweets starting with an "@" are de-facto replies if tweet.tweet.startswith('@'): return True return False
def is_reply(tweet): """ Determines if the tweet is a reply to another tweet. :param tweet: Twint tweet object whose object will be formated """ # A reply's conversation_id is different than the id_str if tweet.conversation_id != tweet.id_str: return True # If not a reply to another user, there will only be 1 entry in reply_to if len(tweet.reply_to) == 1: return False # Check to see if any of the other users "replied" are in the tweet text users = tweet.reply_to[1:] conversations = [user["username"].lower() in tweet.tweet.lower() for user in users] # If any if the usernames are not present in text, then it must be a reply if sum(conversations) < len(users): return True # On older tweets, tweets starting with an "@" are de-facto replies if tweet.tweet.startswith('@'): return True return False
Python
def parse_path(file_path): """ Function that returns file type and windows absolute file path :param file_path: path to file :return: file path, file type """ fpath, ftype = None, None try: fpath = pathlib.Path(file_path).absolute() ftype = get_file_type(file_path=fpath) except FileNotFoundError: pass return str(fpath), ftype
def parse_path(file_path): """ Function that returns file type and windows absolute file path :param file_path: path to file :return: file path, file type """ fpath, ftype = None, None try: fpath = pathlib.Path(file_path).absolute() ftype = get_file_type(file_path=fpath) except FileNotFoundError: pass return str(fpath), ftype
Python
def parse_inpaint_params(**kwargs): """ function to parse the inpainting params :param kwargs: :return: """ sigma_colour = 75 max_level = 4 patch_size = { "x": 5, "y": 5, "t": 5 } texture_feature_activated = 1 return [max_level, patch_size, texture_feature_activated, sigma_colour, "file"]
def parse_inpaint_params(**kwargs): """ function to parse the inpainting params :param kwargs: :return: """ sigma_colour = 75 max_level = 4 patch_size = { "x": 5, "y": 5, "t": 5 } texture_feature_activated = 1 return [max_level, patch_size, texture_feature_activated, sigma_colour, "file"]
Python
def _get_validation_labels(val_path): """Returns labels for validation. Args: val_path: path to TAR file containing validation images. It is used to retrieve the name of pictures and associate them to labels. Returns: dict, mapping from image name (str) to label (str). """ labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME) with tf.io.gfile.GFile(labels_path) as labels_f: # `splitlines` to remove trailing `\r` in Windows labels = labels_f.read().strip().splitlines() with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
def _get_validation_labels(val_path): """Returns labels for validation. Args: val_path: path to TAR file containing validation images. It is used to retrieve the name of pictures and associate them to labels. Returns: dict, mapping from image name (str) to label (str). """ labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME) with tf.io.gfile.GFile(labels_path) as labels_f: # `splitlines` to remove trailing `\r` in Windows labels = labels_f.read().strip().splitlines() with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
Python
def _fix_image(self, image_fname, image): """Fix image color system and format starting from v 3.0.0.""" if self.version < '3.0.0': return image if image_fname in CMYK_IMAGES: image = io.BytesIO(tfds.core.utils.jpeg_cmyk_to_rgb(image.read())) elif image_fname in PNG_IMAGES: image = io.BytesIO(tfds.core.utils.png_to_jpeg(image.read())) return image
def _fix_image(self, image_fname, image): """Fix image color system and format starting from v 3.0.0.""" if self.version < '3.0.0': return image if image_fname in CMYK_IMAGES: image = io.BytesIO(tfds.core.utils.jpeg_cmyk_to_rgb(image.read())) elif image_fname in PNG_IMAGES: image = io.BytesIO(tfds.core.utils.png_to_jpeg(image.read())) return image
Python
def decode_batch_example(self, tfexample_data): """Decode multiple features batched in a single tf.Tensor. This function is used to decode features wrapped in `tfds.features.Sequence()`. By default, this function apply `decode_example` on each individual elements using `tf.map_fn`. However, for optimization, features can overwrite this method to apply a custom batch decoding. Args: tfexample_data: Same `tf.Tensor` inputs as `decode_example`, but with and additional first dimension for the sequence length. Returns: tensor_data: Tensor or dictionary of tensor, output of the tf.data.Dataset object """ # Note: This all works fine in Eager mode (without tf.function) because # tf.data pipelines are always executed in Graph mode. # Apply the decoding to each of the individual distributed features. return tf.map_fn( self.decode_example, tfexample_data, dtype=self.dtype, parallel_iterations=10, back_prop=False, name='sequence_decode', )
def decode_batch_example(self, tfexample_data): """Decode multiple features batched in a single tf.Tensor. This function is used to decode features wrapped in `tfds.features.Sequence()`. By default, this function apply `decode_example` on each individual elements using `tf.map_fn`. However, for optimization, features can overwrite this method to apply a custom batch decoding. Args: tfexample_data: Same `tf.Tensor` inputs as `decode_example`, but with and additional first dimension for the sequence length. Returns: tensor_data: Tensor or dictionary of tensor, output of the tf.data.Dataset object """ # Note: This all works fine in Eager mode (without tf.function) because # tf.data pipelines are always executed in Graph mode. # Apply the decoding to each of the individual distributed features. return tf.map_fn( self.decode_example, tfexample_data, dtype=self.dtype, parallel_iterations=10, back_prop=False, name='sequence_decode', )
Python
def decode_ragged_example(self, tfexample_data): """Decode nested features from a tf.RaggedTensor. This function is used to decode features wrapped in nested `tfds.features.Sequence()`. By default, this function apply `decode_batch_example` on the flat values of the ragged tensor. For optimization, features can overwrite this method to apply a custom batch decoding. Args: tfexample_data: `tf.RaggedTensor` inputs containing the nested encoded examples. Returns: tensor_data: The decoded `tf.RaggedTensor` or dictionary of tensor, output of the tf.data.Dataset object """ return tf.ragged.map_flat_values(self.decode_batch_example, tfexample_data)
def decode_ragged_example(self, tfexample_data): """Decode nested features from a tf.RaggedTensor. This function is used to decode features wrapped in nested `tfds.features.Sequence()`. By default, this function apply `decode_batch_example` on the flat values of the ragged tensor. For optimization, features can overwrite this method to apply a custom batch decoding. Args: tfexample_data: `tf.RaggedTensor` inputs containing the nested encoded examples. Returns: tensor_data: The decoded `tf.RaggedTensor` or dictionary of tensor, output of the tf.data.Dataset object """ return tf.ragged.map_flat_values(self.decode_batch_example, tfexample_data)
Python
def is_notebook(): """Returns True if running in a notebook (Colab, Jupyter) environement.""" # Inspired from the tfdm autonotebook code try: import IPython # pytype: disable=import-error # pylint: disable=import-outside-toplevel,g-import-not-at-top if 'IPKernelApp' not in IPython.get_ipython().config: return False # Run in a IPython terminal except: # pylint: disable=bare-except return False else: return True
def is_notebook(): """Returns True if running in a notebook (Colab, Jupyter) environement.""" # Inspired from the tfdm autonotebook code try: import IPython # pytype: disable=import-error # pylint: disable=import-outside-toplevel,g-import-not-at-top if 'IPKernelApp' not in IPython.get_ipython().config: return False # Run in a IPython terminal except: # pylint: disable=bare-except return False else: return True
Python
def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original)
def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original)
Python
def zip_nested(arg0, *args, **kwargs): """Zip data struct together and return a data struct with the same shape.""" # Python 2 do not support kwargs only arguments dict_only = kwargs.pop('dict_only', False) assert not kwargs # Could add support for more exotic data_struct, like OrderedDict if isinstance(arg0, dict): return { k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args) } elif not dict_only: if isinstance(arg0, list): return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)] # Singleton return (arg0,) + args
def zip_nested(arg0, *args, **kwargs): """Zip data struct together and return a data struct with the same shape.""" # Python 2 do not support kwargs only arguments dict_only = kwargs.pop('dict_only', False) assert not kwargs # Could add support for more exotic data_struct, like OrderedDict if isinstance(arg0, dict): return { k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args) } elif not dict_only: if isinstance(arg0, list): return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)] # Singleton return (arg0,) + args
Python
def flatten_nest_dict(d): """Return the dict with all nested keys flattened joined with '/'.""" # Use NonMutableDict to ensure there is no collision between features keys flat_dict = NonMutableDict() for k, v in d.items(): if isinstance(v, dict): flat_dict.update({ '{}/{}'.format(k, k2): v2 for k2, v2 in flatten_nest_dict(v).items() }) else: flat_dict[k] = v return flat_dict
def flatten_nest_dict(d): """Return the dict with all nested keys flattened joined with '/'.""" # Use NonMutableDict to ensure there is no collision between features keys flat_dict = NonMutableDict() for k, v in d.items(): if isinstance(v, dict): flat_dict.update({ '{}/{}'.format(k, k2): v2 for k2, v2 in flatten_nest_dict(v).items() }) else: flat_dict[k] = v return flat_dict
Python
def pack_as_nest_dict(flat_d, nest_d): """Pack a 1-lvl dict into a nested dict with same structure as `nest_d`.""" nest_out_d = {} for k, v in nest_d.items(): if isinstance(v, dict): v_flat = flatten_nest_dict(v) sub_d = { k2: flat_d.pop('{}/{}'.format(k, k2)) for k2, _ in v_flat.items() } # Recursivelly pack the dictionary nest_out_d[k] = pack_as_nest_dict(sub_d, v) else: nest_out_d[k] = flat_d.pop(k) if flat_d: # At the end, flat_d should be empty raise ValueError( 'Flat dict strucure do not match the nested dict. Extra keys: ' '{}'.format(list(flat_d.keys()))) return nest_out_d
def pack_as_nest_dict(flat_d, nest_d): """Pack a 1-lvl dict into a nested dict with same structure as `nest_d`.""" nest_out_d = {} for k, v in nest_d.items(): if isinstance(v, dict): v_flat = flatten_nest_dict(v) sub_d = { k2: flat_d.pop('{}/{}'.format(k, k2)) for k2, _ in v_flat.items() } # Recursivelly pack the dictionary nest_out_d[k] = pack_as_nest_dict(sub_d, v) else: nest_out_d[k] = flat_d.pop(k) if flat_d: # At the end, flat_d should be empty raise ValueError( 'Flat dict strucure do not match the nested dict. Extra keys: ' '{}'.format(list(flat_d.keys()))) return nest_out_d
Python
def as_proto_cls(proto_cls): """Simulate proto inheritance. By default, protobuf do not support direct inheritance, so this decorator simulates inheritance to the class to which it is applied. Example: ``` @as_proto_class(proto.MyProto) class A(object): def custom_method(self): return self.proto_field * 10 p = proto.MyProto(proto_field=123) a = A() a.CopyFrom(p) # a is like a proto object assert a.proto_field == 123 a.custom_method() # But has additional methods ``` Args: proto_cls: The protobuf class to inherit from Returns: decorated_cls: The decorated class """ def decorator(cls): """Decorator applied to the class.""" class ProtoCls(object): """Base class simulating the protobuf.""" def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( '_ProtoCls__proto', proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: if isinstance(new_value, list): self.ClearField(attr_name) getattr(self.__proto, attr_name).extend(new_value) else: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return '<{cls_name}\n{proto_repr}\n>'.format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { '__doc__': cls.__doc__, }) return decorator_cls return decorator
def as_proto_cls(proto_cls): """Simulate proto inheritance. By default, protobuf do not support direct inheritance, so this decorator simulates inheritance to the class to which it is applied. Example: ``` @as_proto_class(proto.MyProto) class A(object): def custom_method(self): return self.proto_field * 10 p = proto.MyProto(proto_field=123) a = A() a.CopyFrom(p) # a is like a proto object assert a.proto_field == 123 a.custom_method() # But has additional methods ``` Args: proto_cls: The protobuf class to inherit from Returns: decorated_cls: The decorated class """ def decorator(cls): """Decorator applied to the class.""" class ProtoCls(object): """Base class simulating the protobuf.""" def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( '_ProtoCls__proto', proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: if isinstance(new_value, list): self.ClearField(attr_name) getattr(self.__proto, attr_name).extend(new_value) else: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return '<{cls_name}\n{proto_repr}\n>'.format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { '__doc__': cls.__doc__, }) return decorator_cls return decorator
Python
def decorator(cls): """Decorator applied to the class.""" class ProtoCls(object): """Base class simulating the protobuf.""" def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( '_ProtoCls__proto', proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: if isinstance(new_value, list): self.ClearField(attr_name) getattr(self.__proto, attr_name).extend(new_value) else: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return '<{cls_name}\n{proto_repr}\n>'.format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { '__doc__': cls.__doc__, }) return decorator_cls
def decorator(cls): """Decorator applied to the class.""" class ProtoCls(object): """Base class simulating the protobuf.""" def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( '_ProtoCls__proto', proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: if isinstance(new_value, list): self.ClearField(attr_name) getattr(self.__proto, attr_name).extend(new_value) else: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return '<{cls_name}\n{proto_repr}\n>'.format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { '__doc__': cls.__doc__, }) return decorator_cls
Python
def _get_incomplete_path(filename): """Returns a temporary filename based on filename.""" random_suffix = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) return filename + '.incomplete' + random_suffix
def _get_incomplete_path(filename): """Returns a temporary filename based on filename.""" random_suffix = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) return filename + '.incomplete' + random_suffix
Python
def incomplete_dir(dirname): """Create temporary dir for dirname and rename on exit.""" tmp_dir = _get_incomplete_path(dirname) tf.io.gfile.makedirs(tmp_dir) try: yield tmp_dir tf.io.gfile.rename(tmp_dir, dirname) finally: if tf.io.gfile.exists(tmp_dir): tf.io.gfile.rmtree(tmp_dir)
def incomplete_dir(dirname): """Create temporary dir for dirname and rename on exit.""" tmp_dir = _get_incomplete_path(dirname) tf.io.gfile.makedirs(tmp_dir) try: yield tmp_dir tf.io.gfile.rename(tmp_dir, dirname) finally: if tf.io.gfile.exists(tmp_dir): tf.io.gfile.rmtree(tmp_dir)
Python
def atomic_write(path, mode): """Writes to path atomically, by writing to temp file and renaming it.""" tmp_path = '%s%s_%s' % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) with tf.io.gfile.GFile(tmp_path, mode) as file_: yield file_ tf.io.gfile.rename(tmp_path, path, overwrite=True)
def atomic_write(path, mode): """Writes to path atomically, by writing to temp file and renaming it.""" tmp_path = '%s%s_%s' % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) with tf.io.gfile.GFile(tmp_path, mode) as file_: yield file_ tf.io.gfile.rename(tmp_path, path, overwrite=True)
Python
def read_checksum_digest(path, checksum_cls=hashlib.sha256): """Given a hash constructor, returns checksum digest and size of file.""" checksum = checksum_cls() size = 0 with tf.io.gfile.GFile(path, 'rb') as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) size += len(block) if not block: break checksum.update(block) return checksum.hexdigest(), size
def read_checksum_digest(path, checksum_cls=hashlib.sha256): """Given a hash constructor, returns checksum digest and size of file.""" checksum = checksum_cls() size = 0 with tf.io.gfile.GFile(path, 'rb') as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) size += len(block) if not block: break checksum.update(block) return checksum.hexdigest(), size
Python
def reraise(prefix=None, suffix=None): """Reraise an exception with an additional message.""" exc_type, exc_value, exc_traceback = sys.exc_info() prefix = prefix or '' suffix = '\n' + suffix if suffix else '' msg = prefix + str(exc_value) + suffix six.reraise(exc_type, exc_type(msg), exc_traceback)
def reraise(prefix=None, suffix=None): """Reraise an exception with an additional message.""" exc_type, exc_value, exc_traceback = sys.exc_info() prefix = prefix or '' suffix = '\n' + suffix if suffix else '' msg = prefix + str(exc_value) + suffix six.reraise(exc_type, exc_type(msg), exc_traceback)
Python
def rgetattr(obj, attr, *args): """Get attr that handles dots in attr name.""" def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
def rgetattr(obj, attr, *args): """Get attr that handles dots in attr name.""" def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
Python
def list_info_files(dir_path: str) -> List[str]: """Returns name of info files within dir_path.""" # TODO(tfds): Is there a better filtering scheme which would be more # resistant to future modifications (ex: tfrecord => other format) return [ fname for fname in tf.io.gfile.listdir(dir_path) if '.tfrecord' not in fname and not tf.io.gfile.isdir(os.path.join(dir_path, fname)) ]
def list_info_files(dir_path: str) -> List[str]: """Returns name of info files within dir_path.""" # TODO(tfds): Is there a better filtering scheme which would be more # resistant to future modifications (ex: tfrecord => other format) return [ fname for fname in tf.io.gfile.listdir(dir_path) if '.tfrecord' not in fname and not tf.io.gfile.isdir(os.path.join(dir_path, fname)) ]
Python
def run_in_graph_and_eager_modes(func=None, config=None, use_gpu=True): """Execute the decorated test in both graph mode and eager mode. This function returns a decorator intended to be applied to test methods in a `test_case.TestCase` class. Doing so will cause the contents of the test method to be executed twice - once in graph mode, and once with eager execution enabled. This allows unittests to confirm the equivalence between eager and graph execution. NOTE: This decorator can only be used when executing eagerly in the outer scope. For example, consider the following unittest: ```python class SomeTest(tfds.testing.TestCase): @tfds.testing.run_in_graph_and_eager_modes def test_foo(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) z = tf.add(x, y) self.assertAllEqual([4, 6], self.evaluate(z)) if __name__ == '__main__': tfds.testing.test_main() ``` This test validates that `tf.add()` has the same behavior when computed with eager execution enabled as it does when constructing a TensorFlow graph and executing the `z` tensor with a session. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. use_gpu: If True, attempt to run as many operations as possible on GPU. Returns: Returns a decorator that will run the decorated test method twice: once by constructing and executing a graph in a session and once with eager execution enabled. """ def decorator(f): """Decorator for a method.""" def decorated(self, *args, **kwargs): """Run the decorated test method.""" if not tf.executing_eagerly(): raise ValueError('Must be executing eagerly when using the ' 'run_in_graph_and_eager_modes decorator.') # Run eager block f(self, *args, **kwargs) self.tearDown() # Run in graph mode block with tf.Graph().as_default(): self.setUp() with self.test_session(use_gpu=use_gpu, config=config): f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator
def run_in_graph_and_eager_modes(func=None, config=None, use_gpu=True): """Execute the decorated test in both graph mode and eager mode. This function returns a decorator intended to be applied to test methods in a `test_case.TestCase` class. Doing so will cause the contents of the test method to be executed twice - once in graph mode, and once with eager execution enabled. This allows unittests to confirm the equivalence between eager and graph execution. NOTE: This decorator can only be used when executing eagerly in the outer scope. For example, consider the following unittest: ```python class SomeTest(tfds.testing.TestCase): @tfds.testing.run_in_graph_and_eager_modes def test_foo(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) z = tf.add(x, y) self.assertAllEqual([4, 6], self.evaluate(z)) if __name__ == '__main__': tfds.testing.test_main() ``` This test validates that `tf.add()` has the same behavior when computed with eager execution enabled as it does when constructing a TensorFlow graph and executing the `z` tensor with a session. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. use_gpu: If True, attempt to run as many operations as possible on GPU. Returns: Returns a decorator that will run the decorated test method twice: once by constructing and executing a graph in a session and once with eager execution enabled. """ def decorator(f): """Decorator for a method.""" def decorated(self, *args, **kwargs): """Run the decorated test method.""" if not tf.executing_eagerly(): raise ValueError('Must be executing eagerly when using the ' 'run_in_graph_and_eager_modes decorator.') # Run eager block f(self, *args, **kwargs) self.tearDown() # Run in graph mode block with tf.Graph().as_default(): self.setUp() with self.test_session(use_gpu=use_gpu, config=config): f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator
Python
def decorated(self, *args, **kwargs): """Run the decorated test method.""" if not tf.executing_eagerly(): raise ValueError('Must be executing eagerly when using the ' 'run_in_graph_and_eager_modes decorator.') # Run eager block f(self, *args, **kwargs) self.tearDown() # Run in graph mode block with tf.Graph().as_default(): self.setUp() with self.test_session(use_gpu=use_gpu, config=config): f(self, *args, **kwargs)
def decorated(self, *args, **kwargs): """Run the decorated test method.""" if not tf.executing_eagerly(): raise ValueError('Must be executing eagerly when using the ' 'run_in_graph_and_eager_modes decorator.') # Run eager block f(self, *args, **kwargs) self.tearDown() # Run in graph mode block with tf.Graph().as_default(): self.setUp() with self.test_session(use_gpu=use_gpu, config=config): f(self, *args, **kwargs)
Python
def assertFeature(self, feature, shape, dtype, tests, serialized_info=None): """Test the given feature against the predicates.""" # Check the shape/dtype with self._subTest('shape'): self.assertEqual(feature.shape, shape) with self._subTest('dtype'): self.assertEqual(feature.dtype, dtype) # Check the serialized features if serialized_info is not None: with self._subTest('serialized_info'): self.assertEqual( serialized_info, feature.get_serialized_info(), ) # Create the feature dict fdict = features.FeaturesDict({'inner': feature}) fdict._set_top_level() # pylint: disable=protected-access for i, test in enumerate(tests): with self._subTest(str(i)): self.assertFeatureTest( fdict=fdict, test=test, feature=feature, shape=shape, dtype=dtype, )
def assertFeature(self, feature, shape, dtype, tests, serialized_info=None): """Test the given feature against the predicates.""" # Check the shape/dtype with self._subTest('shape'): self.assertEqual(feature.shape, shape) with self._subTest('dtype'): self.assertEqual(feature.dtype, dtype) # Check the serialized features if serialized_info is not None: with self._subTest('serialized_info'): self.assertEqual( serialized_info, feature.get_serialized_info(), ) # Create the feature dict fdict = features.FeaturesDict({'inner': feature}) fdict._set_top_level() # pylint: disable=protected-access for i, test in enumerate(tests): with self._subTest(str(i)): self.assertFeatureTest( fdict=fdict, test=test, feature=feature, shape=shape, dtype=dtype, )
Python
def assertFeatureTest(self, fdict, test, feature, shape, dtype): """Test that encode=>decoding of a value works correctly.""" # test feature.encode_example can be pickled and unpickled for beam. dill.loads(dill.dumps(feature.encode_example)) input_value = {'inner': test.value} if test.raise_cls is not None: with self._subTest('raise'): if not test.raise_msg: raise ValueError( 'test.raise_msg should be set with {} for test {}'.format( test.raise_cls, type(feature))) with self.assertRaisesWithPredicateMatch( test.raise_cls, test.raise_msg): features_encode_decode(fdict, input_value, decoders=test.decoders) else: # Test the serialization only if test.expected_serialized is not None: with self._subTest('out_serialize'): self.assertEqual( test.expected_serialized, feature.encode_example(test.value), ) # Test serialization + decoding from disk with self._subTest('out'): out_tensor, out_numpy = features_encode_decode( fdict, input_value, decoders={'inner': test.decoders}, ) out_tensor = out_tensor['inner'] out_numpy = out_numpy['inner'] # Assert the returned type match the expected one with self._subTest('dtype'): out_dtypes = tf.nest.map_structure(lambda s: s.dtype, out_tensor) self.assertEqual(out_dtypes, test.dtype or feature.dtype) with self._subTest('shape'): # For shape, because (None, 3) match with (5, 3), we use # tf.TensorShape.assert_is_compatible_with on each of the elements expected_shape = feature.shape if test.shape is None else test.shape out_shapes = utils.zip_nested(out_tensor, expected_shape) utils.map_nested( lambda x: x[0].shape.assert_is_compatible_with(x[1]), out_shapes ) # Assert value with self._subTest('out_value'): # Eventually construct the tf.RaggedTensor expected = tf.nest.map_structure( lambda t: t.build() if isinstance(t, RaggedConstant) else t, test.expected) self.assertAllEqualNested(out_numpy, expected) # Assert the HTML representation works with self._subTest('repr'): self._test_repr(feature, out_numpy)
def assertFeatureTest(self, fdict, test, feature, shape, dtype): """Test that encode=>decoding of a value works correctly.""" # test feature.encode_example can be pickled and unpickled for beam. dill.loads(dill.dumps(feature.encode_example)) input_value = {'inner': test.value} if test.raise_cls is not None: with self._subTest('raise'): if not test.raise_msg: raise ValueError( 'test.raise_msg should be set with {} for test {}'.format( test.raise_cls, type(feature))) with self.assertRaisesWithPredicateMatch( test.raise_cls, test.raise_msg): features_encode_decode(fdict, input_value, decoders=test.decoders) else: # Test the serialization only if test.expected_serialized is not None: with self._subTest('out_serialize'): self.assertEqual( test.expected_serialized, feature.encode_example(test.value), ) # Test serialization + decoding from disk with self._subTest('out'): out_tensor, out_numpy = features_encode_decode( fdict, input_value, decoders={'inner': test.decoders}, ) out_tensor = out_tensor['inner'] out_numpy = out_numpy['inner'] # Assert the returned type match the expected one with self._subTest('dtype'): out_dtypes = tf.nest.map_structure(lambda s: s.dtype, out_tensor) self.assertEqual(out_dtypes, test.dtype or feature.dtype) with self._subTest('shape'): # For shape, because (None, 3) match with (5, 3), we use # tf.TensorShape.assert_is_compatible_with on each of the elements expected_shape = feature.shape if test.shape is None else test.shape out_shapes = utils.zip_nested(out_tensor, expected_shape) utils.map_nested( lambda x: x[0].shape.assert_is_compatible_with(x[1]), out_shapes ) # Assert value with self._subTest('out_value'): # Eventually construct the tf.RaggedTensor expected = tf.nest.map_structure( lambda t: t.build() if isinstance(t, RaggedConstant) else t, test.expected) self.assertAllEqualNested(out_numpy, expected) # Assert the HTML representation works with self._subTest('repr'): self._test_repr(feature, out_numpy)
Python
def mock_kaggle_api(err_msg=None): """Mock out the kaggle CLI. Args: err_msg: `str`, if provided, the kaggle CLI will raise a CalledProcessError and this will be the command output. Yields: None, context will have kaggle CLI mocked out. """ def check_output(command_args, encoding=None): """Mock subprocess.check_output for download call.""" assert encoding assert command_args[2] == 'download' competition_or_dataset = command_args[-1] if err_msg: raise subprocess.CalledProcessError(1, command_args, err_msg) out_dir = command_args[command_args.index('--path') + 1] fpath = os.path.join(out_dir, 'output.txt') with tf.io.gfile.GFile(fpath, 'w') as f: f.write(competition_or_dataset) return 'Downloading {} to {}'.format(competition_or_dataset, fpath) with absltest.mock.patch('subprocess.check_output', check_output): yield
def mock_kaggle_api(err_msg=None): """Mock out the kaggle CLI. Args: err_msg: `str`, if provided, the kaggle CLI will raise a CalledProcessError and this will be the command output. Yields: None, context will have kaggle CLI mocked out. """ def check_output(command_args, encoding=None): """Mock subprocess.check_output for download call.""" assert encoding assert command_args[2] == 'download' competition_or_dataset = command_args[-1] if err_msg: raise subprocess.CalledProcessError(1, command_args, err_msg) out_dir = command_args[command_args.index('--path') + 1] fpath = os.path.join(out_dir, 'output.txt') with tf.io.gfile.GFile(fpath, 'w') as f: f.write(competition_or_dataset) return 'Downloading {} to {}'.format(competition_or_dataset, fpath) with absltest.mock.patch('subprocess.check_output', check_output): yield
Python
def check_output(command_args, encoding=None): """Mock subprocess.check_output for download call.""" assert encoding assert command_args[2] == 'download' competition_or_dataset = command_args[-1] if err_msg: raise subprocess.CalledProcessError(1, command_args, err_msg) out_dir = command_args[command_args.index('--path') + 1] fpath = os.path.join(out_dir, 'output.txt') with tf.io.gfile.GFile(fpath, 'w') as f: f.write(competition_or_dataset) return 'Downloading {} to {}'.format(competition_or_dataset, fpath)
def check_output(command_args, encoding=None): """Mock subprocess.check_output for download call.""" assert encoding assert command_args[2] == 'download' competition_or_dataset = command_args[-1] if err_msg: raise subprocess.CalledProcessError(1, command_args, err_msg) out_dir = command_args[command_args.index('--path') + 1] fpath = os.path.join(out_dir, 'output.txt') with tf.io.gfile.GFile(fpath, 'w') as f: f.write(competition_or_dataset) return 'Downloading {} to {}'.format(competition_or_dataset, fpath)
Python
def normalize_hostname(myservername, myinstanceid): """ Generate Nextdoor-normalized hostname w/ embedded Amazon instance ID. Args: myservername (str): the servername minus the instance ID myinstanceid (str): the Amazon instand ID Returns: string with normalized Nextdoor-legit hostname """ myinstanceid = re.sub('[-]+', '', myinstanceid) return re.sub('\.', '', utils.normalize_hostname_to_rfc("{}-{}".format(myservername, myinstanceid)))
def normalize_hostname(myservername, myinstanceid): """ Generate Nextdoor-normalized hostname w/ embedded Amazon instance ID. Args: myservername (str): the servername minus the instance ID myinstanceid (str): the Amazon instand ID Returns: string with normalized Nextdoor-legit hostname """ myinstanceid = re.sub('[-]+', '', myinstanceid) return re.sub('\.', '', utils.normalize_hostname_to_rfc("{}-{}".format(myservername, myinstanceid)))
Python
def normalize_domain(mydomain): """ Nextdoor-nomalize the passed domainname. Args: mydomain (str): the domainname to normalize Returns: a normalized hostname as a string """ return utils.normalize_hostname_to_rfc(mydomain)
def normalize_domain(mydomain): """ Nextdoor-nomalize the passed domainname. Args: mydomain (str): the domainname to normalize Returns: a normalized hostname as a string """ return utils.normalize_hostname_to_rfc(mydomain)
Python
def install_dependencies(): """ Install some tooling which may be needed to bootstrap Puppet. """ debs = 'wget' blacklist_debs = 'puppet' environ['DEBIAN_FRONTEND'] = 'noninteractive' environ['DEBCONF_INTERACTIVE_SEEN'] = 'true' apt_get_update() assert_command('apt-get install -y ' + debs, 'Unable to install required .debs!') assert_command('apt-get remove --purge -y ' + blacklist_debs, 'Unable to uninstall blacklisted .debs!')
def install_dependencies(): """ Install some tooling which may be needed to bootstrap Puppet. """ debs = 'wget' blacklist_debs = 'puppet' environ['DEBIAN_FRONTEND'] = 'noninteractive' environ['DEBCONF_INTERACTIVE_SEEN'] = 'true' apt_get_update() assert_command('apt-get install -y ' + debs, 'Unable to install required .debs!') assert_command('apt-get remove --purge -y ' + blacklist_debs, 'Unable to uninstall blacklisted .debs!')
Python
def configure_puppet_external_facts(): """ Create external Facts from misc RightInputs. """ if 'PUPPET_CUSTOM_FACTS' in environ: # take the envvar apart and reconstitute as dict validate_env('PUPPET_CUSTOM_FACTS', '^\w+=.+(,\w+=.+)*$') fact_dict = {} facts = environ['PUPPET_CUSTOM_FACTS'].split(',') for fact in facts: log_and_stdout("fact: {}".format(str(fact))) (key, value) = fact.split('=', 1) fact_dict[key] = value # construct some YAML and dump it into external fact file try: mkdir_p('/etc/facter/facts.d') with open('/etc/facter/facts.d/nextdoor_misc_rightscale_inputs.yaml', 'w') as outfile: outfile.write( yaml.dump(fact_dict, explicit_start=True, default_flow_style=False)) except IOError as e: sys.exit(" *** {} :: {} :: {} *** ".format(e.errno, e.filename, e.strerror))
def configure_puppet_external_facts(): """ Create external Facts from misc RightInputs. """ if 'PUPPET_CUSTOM_FACTS' in environ: # take the envvar apart and reconstitute as dict validate_env('PUPPET_CUSTOM_FACTS', '^\w+=.+(,\w+=.+)*$') fact_dict = {} facts = environ['PUPPET_CUSTOM_FACTS'].split(',') for fact in facts: log_and_stdout("fact: {}".format(str(fact))) (key, value) = fact.split('=', 1) fact_dict[key] = value # construct some YAML and dump it into external fact file try: mkdir_p('/etc/facter/facts.d') with open('/etc/facter/facts.d/nextdoor_misc_rightscale_inputs.yaml', 'w') as outfile: outfile.write( yaml.dump(fact_dict, explicit_start=True, default_flow_style=False)) except IOError as e: sys.exit(" *** {} :: {} :: {} *** ".format(e.errno, e.filename, e.strerror))
Python
def bootstrap_puppet_agent_config(): """ Adjust various settings in puppet.conf and create an external Facts file for Puppet-specific stuff. """ dmc = '^.+$' for key, regex in { 'PUPPET_ENVIRONMENT_NAME': dmc, 'PUPPET_SERVER_HOSTNAME': dmc, 'PUPPET_CA_SERVER': dmc, 'PUPPET_ENABLE_REPORTS': '^(true|false)$', 'PUPPET_NODE_NAME': '^(cert|facter)$', 'PUPPET_NODE_NAME_FACT': dmc }.items(): validate_env(key, regex) # # The logic around how a node is identified based on: # # * puppet_node_name # * puppet_node_name_fact # * puppet_node_name_value # # is fairly ridiculous. I think the easiest way to document # the madness which follows is to draw a table. # # Those familiar with paleo-Puppet node classification techniques will # note that the above variables are just puppet.conf settings prefixed # with 'puppet_'. # # | puppet_node_name | puppet_node_name_fact | puppet_node_name_value | # |-------------------------------------------------------------------| # | cert | $ facter hostname | not referenced | # |-------------------------------------------------------------------| # | facter | if 'puppet_node' | ^.+$ aka not nuthin' | # | |------------------------------------------------| # | | if ! 'puppet_node' | not referenced | # --------------------------------------------------------------------- # puppet_node_name = environ['PUPPET_NODE_NAME'] puppet_node_name_fact = environ['PUPPET_NODE_NAME_FACT'] puppet_node_name_value = '' log_and_stdout( "Puppet node name resolution:: puppet_node_name: {}".format(puppet_node_name)) if 'cert' == puppet_node_name: if 'hostname' != puppet_node_name_fact: log_and_stdout( "{} => {} forced to {} => {}".format( 'node_name_fact', puppet_node_name, 'node_name_fact', 'hostname')) puppet_node_name_fact = 'hostname' if 'facter' == puppet_node_name: if 'puppet_node' == puppet_node_name_fact: validate_env('PUPPET_NODE_NAME_VALUE', dmc) puppet_node_name_value = environ['PUPPET_NODE_NAME_VALUE'] log_and_stdout( "puppet_node => {}".format(puppet_node_name_value)) else: if '' != puppet_node_name_value: log_and_stdout( "Ignoring PUPPET_NODE_NAME_VALUE because PUPPET_NAME_FACT != 'puppet_node'") # # If all of the validations and fiddling about with puppet_node has # worked out then let's update the puppet.conf and some external facts. # # puppet.conf settings puppet_settings = { 'environment': environ['PUPPET_ENVIRONMENT_NAME'], 'server': environ['PUPPET_SERVER_HOSTNAME'], 'ca_server': environ['PUPPET_CA_SERVER'], 'report': environ['PUPPET_ENABLE_REPORTS'], 'node_name': puppet_node_name, 'node_name_fact': puppet_node_name_fact, } external_facts = { 'puppet_environment': environ['PUPPET_ENVIRONMENT_NAME'], 'puppet_server': environ['PUPPET_SERVER_HOSTNAME'], 'puppet_ca_server': environ['PUPPET_CA_SERVER'], } if 'cert' == puppet_node_name: pass # just here for completeness and transparency elif 'facter' == puppet_node_name: if 'puppet_node' == puppet_node_name_fact: # This could live in puppet.conf as node_name_value but this # makes it visible via 'facter puppet_node'. external_facts['puppet_node'] = "{}|{}".format( puppet_node_name_value, getfqdn()) else: pass # this here for completeness and transparency for setting, value in puppet_settings.items(): assert_command( '/usr/bin/puppet config set {} {} --section agent'.format( setting, value), 'Failed to set \'{}\' to \'{}\' in puppet.conf!'.format(setting, value)) # Drop some external Facts for Puppet settings try: mkdir_p('/etc/facter/facts.d') with open('/etc/facter/facts.d/nextdoor_puppet.yaml', 'w') as outfile: outfile.write( yaml.dump(external_facts, explicit_start=True, default_flow_style=False)) except IOError as e: sys.exit(" *** {} :: {} :: {} *** ".format( e.errno, e.filename, e.strerr))
def bootstrap_puppet_agent_config(): """ Adjust various settings in puppet.conf and create an external Facts file for Puppet-specific stuff. """ dmc = '^.+$' for key, regex in { 'PUPPET_ENVIRONMENT_NAME': dmc, 'PUPPET_SERVER_HOSTNAME': dmc, 'PUPPET_CA_SERVER': dmc, 'PUPPET_ENABLE_REPORTS': '^(true|false)$', 'PUPPET_NODE_NAME': '^(cert|facter)$', 'PUPPET_NODE_NAME_FACT': dmc }.items(): validate_env(key, regex) # # The logic around how a node is identified based on: # # * puppet_node_name # * puppet_node_name_fact # * puppet_node_name_value # # is fairly ridiculous. I think the easiest way to document # the madness which follows is to draw a table. # # Those familiar with paleo-Puppet node classification techniques will # note that the above variables are just puppet.conf settings prefixed # with 'puppet_'. # # | puppet_node_name | puppet_node_name_fact | puppet_node_name_value | # |-------------------------------------------------------------------| # | cert | $ facter hostname | not referenced | # |-------------------------------------------------------------------| # | facter | if 'puppet_node' | ^.+$ aka not nuthin' | # | |------------------------------------------------| # | | if ! 'puppet_node' | not referenced | # --------------------------------------------------------------------- # puppet_node_name = environ['PUPPET_NODE_NAME'] puppet_node_name_fact = environ['PUPPET_NODE_NAME_FACT'] puppet_node_name_value = '' log_and_stdout( "Puppet node name resolution:: puppet_node_name: {}".format(puppet_node_name)) if 'cert' == puppet_node_name: if 'hostname' != puppet_node_name_fact: log_and_stdout( "{} => {} forced to {} => {}".format( 'node_name_fact', puppet_node_name, 'node_name_fact', 'hostname')) puppet_node_name_fact = 'hostname' if 'facter' == puppet_node_name: if 'puppet_node' == puppet_node_name_fact: validate_env('PUPPET_NODE_NAME_VALUE', dmc) puppet_node_name_value = environ['PUPPET_NODE_NAME_VALUE'] log_and_stdout( "puppet_node => {}".format(puppet_node_name_value)) else: if '' != puppet_node_name_value: log_and_stdout( "Ignoring PUPPET_NODE_NAME_VALUE because PUPPET_NAME_FACT != 'puppet_node'") # # If all of the validations and fiddling about with puppet_node has # worked out then let's update the puppet.conf and some external facts. # # puppet.conf settings puppet_settings = { 'environment': environ['PUPPET_ENVIRONMENT_NAME'], 'server': environ['PUPPET_SERVER_HOSTNAME'], 'ca_server': environ['PUPPET_CA_SERVER'], 'report': environ['PUPPET_ENABLE_REPORTS'], 'node_name': puppet_node_name, 'node_name_fact': puppet_node_name_fact, } external_facts = { 'puppet_environment': environ['PUPPET_ENVIRONMENT_NAME'], 'puppet_server': environ['PUPPET_SERVER_HOSTNAME'], 'puppet_ca_server': environ['PUPPET_CA_SERVER'], } if 'cert' == puppet_node_name: pass # just here for completeness and transparency elif 'facter' == puppet_node_name: if 'puppet_node' == puppet_node_name_fact: # This could live in puppet.conf as node_name_value but this # makes it visible via 'facter puppet_node'. external_facts['puppet_node'] = "{}|{}".format( puppet_node_name_value, getfqdn()) else: pass # this here for completeness and transparency for setting, value in puppet_settings.items(): assert_command( '/usr/bin/puppet config set {} {} --section agent'.format( setting, value), 'Failed to set \'{}\' to \'{}\' in puppet.conf!'.format(setting, value)) # Drop some external Facts for Puppet settings try: mkdir_p('/etc/facter/facts.d') with open('/etc/facter/facts.d/nextdoor_puppet.yaml', 'w') as outfile: outfile.write( yaml.dump(external_facts, explicit_start=True, default_flow_style=False)) except IOError as e: sys.exit(" *** {} :: {} :: {} *** ".format( e.errno, e.filename, e.strerr))
Python
def install_puppet_agent(): """ Install the Puppet agent repo and packages. """ validate_env('PUPPET_AGENT_VERSION', '^([\w\.\-]+|PC\d+)$') puppet_version = environ['PUPPET_AGENT_VERSION'].lower() puppet_repo_package = 'puppetlabs-release-trusty.deb' puppet_repo_package_url = 'https://apt.puppetlabs.com/' + puppet_repo_package assert_command("wget -c {}".format(puppet_repo_package_url), 'Failed to fetch Puppet repo package!', cwd='/tmp') assert_command("dpkg -i {}".format(puppet_repo_package), 'Failed to install Puppet repo package!', cwd='/tmp') assert_command('apt-get update', "Failed to refresh apt cache!") assert_command("apt-get install -y puppet-common={} puppet={}".format( puppet_version, puppet_version), 'Failed to install Puppet!')
def install_puppet_agent(): """ Install the Puppet agent repo and packages. """ validate_env('PUPPET_AGENT_VERSION', '^([\w\.\-]+|PC\d+)$') puppet_version = environ['PUPPET_AGENT_VERSION'].lower() puppet_repo_package = 'puppetlabs-release-trusty.deb' puppet_repo_package_url = 'https://apt.puppetlabs.com/' + puppet_repo_package assert_command("wget -c {}".format(puppet_repo_package_url), 'Failed to fetch Puppet repo package!', cwd='/tmp') assert_command("dpkg -i {}".format(puppet_repo_package), 'Failed to install Puppet repo package!', cwd='/tmp') assert_command('apt-get update', "Failed to refresh apt cache!") assert_command("apt-get install -y puppet-common={} puppet={}".format( puppet_version, puppet_version), 'Failed to install Puppet!')
Python
def puppet_agent_bootstrapped(): """ Predicate to detect if Puppet has already been installed. Returns: boolean True or False """ classification_data = '/var/lib/puppet/state/catalog.txt' # classes.txt only gets dropped on a successful Puppet run. if (os.path.exists(classification_data)): return True else: return False
def puppet_agent_bootstrapped(): """ Predicate to detect if Puppet has already been installed. Returns: boolean True or False """ classification_data = '/var/lib/puppet/state/catalog.txt' # classes.txt only gets dropped on a successful Puppet run. if (os.path.exists(classification_data)): return True else: return False
Python
def create_rightscale_puppet_tags(secret): """ Create the RightScale tags used for Puppet master auto-signing. """ validate_env('RS_SELF_HREF', '^.+$') for tag in ['nd:puppet_state=waiting', "nd:puppet_secret={}".format(secret)]: cmd = "rsc --rl10 cm15 multi_add /api/tags/multi_add resource_hrefs[]={} tags[]={}".format( environ['RS_SELF_HREF'], tag) assert_command( cmd, "Failed to register RightScale tag \'{}\' for Puppet policy-base signing!".format(tag))
def create_rightscale_puppet_tags(secret): """ Create the RightScale tags used for Puppet master auto-signing. """ validate_env('RS_SELF_HREF', '^.+$') for tag in ['nd:puppet_state=waiting', "nd:puppet_secret={}".format(secret)]: cmd = "rsc --rl10 cm15 multi_add /api/tags/multi_add resource_hrefs[]={} tags[]={}".format( environ['RS_SELF_HREF'], tag) assert_command( cmd, "Failed to register RightScale tag \'{}\' for Puppet policy-base signing!".format(tag))
Python
def create_puppet_agent_cert(): """ Embed Nextdoor information into the Puppet agent CSR/cert. """ challenge_password = False preshared_key = ''.join(random.choice( string.ascii_letters + string.digits) for _ in range(36)) if "PUPPET_CHALLENGE_PASSWORD" in environ: validate_env('PUPPET_CHALLENGE_PASSWORD', '^.+$') challenge_password = environ['PUPPET_CHALLENGE_PASSWORD'] csr_attrs = {'extension_requests': {'pp_preshared_key': preshared_key}} if challenge_password: csr_attrs['custom_attributes'] = { '1.2.840.113549.1.9.7': challenge_password} try: with open('/etc/puppet/csr_attributes.yaml', 'wb') as outfile: outfile.write( yaml.dump(csr_attrs, explicit_start=True, default_flow_style=False, encoding='utf-8')) os.chmod('/etc/puppet/csr_attributes.yaml', 0o644) except (IOError, OSError) as e: sys.exit(" *** {} :: {} :: {} *** ".format(e.errno, e.filename, e.strerror)) create_rightscale_puppet_tags(preshared_key)
def create_puppet_agent_cert(): """ Embed Nextdoor information into the Puppet agent CSR/cert. """ challenge_password = False preshared_key = ''.join(random.choice( string.ascii_letters + string.digits) for _ in range(36)) if "PUPPET_CHALLENGE_PASSWORD" in environ: validate_env('PUPPET_CHALLENGE_PASSWORD', '^.+$') challenge_password = environ['PUPPET_CHALLENGE_PASSWORD'] csr_attrs = {'extension_requests': {'pp_preshared_key': preshared_key}} if challenge_password: csr_attrs['custom_attributes'] = { '1.2.840.113549.1.9.7': challenge_password} try: with open('/etc/puppet/csr_attributes.yaml', 'wb') as outfile: outfile.write( yaml.dump(csr_attrs, explicit_start=True, default_flow_style=False, encoding='utf-8')) os.chmod('/etc/puppet/csr_attributes.yaml', 0o644) except (IOError, OSError) as e: sys.exit(" *** {} :: {} :: {} *** ".format(e.errno, e.filename, e.strerror)) create_rightscale_puppet_tags(preshared_key)
Python
def run_puppet_agent(): """ Kick off a Puppet agent run. With retries to cover eventual convergence. """ cmd = "/usr/bin/puppet agent -t --detailed-exitcodes --waitforcert 15" # These are likely set in puppet.conf before the Puppet agent run however # its entirely possible that a run will change the contents of puppet.conf # but not represent a complete convergence. On follow-up runs we thus cannot # rely on the values specified in puppet.conf. FIXME: make sure all node profiles # converge on first run. ;) dmc = '^.+$' # don't much care for key, value in { 'PUPPET_ENVIRONMENT_NAME': 'environment', 'PUPPET_SERVER_HOSTNAME': 'server', 'PUPPET_CA_SERVER': 'ca_server', 'PUPPET_AGENT_USE_CACHED_CATALOG': 'use_cached_catalog', }.items(): if key in environ: validate_env(key, dmc) cmd = ''.join((cmd, " --{} {}".format(value, environ[key]))) assert_command(cmd, 'Puppet run failed!', retries=5)
def run_puppet_agent(): """ Kick off a Puppet agent run. With retries to cover eventual convergence. """ cmd = "/usr/bin/puppet agent -t --detailed-exitcodes --waitforcert 15" # These are likely set in puppet.conf before the Puppet agent run however # its entirely possible that a run will change the contents of puppet.conf # but not represent a complete convergence. On follow-up runs we thus cannot # rely on the values specified in puppet.conf. FIXME: make sure all node profiles # converge on first run. ;) dmc = '^.+$' # don't much care for key, value in { 'PUPPET_ENVIRONMENT_NAME': 'environment', 'PUPPET_SERVER_HOSTNAME': 'server', 'PUPPET_CA_SERVER': 'ca_server', 'PUPPET_AGENT_USE_CACHED_CATALOG': 'use_cached_catalog', }.items(): if key in environ: validate_env(key, dmc) cmd = ''.join((cmd, " --{} {}".format(value, environ[key]))) assert_command(cmd, 'Puppet run failed!', retries=5)
Python
def configure_puppet_agent(): """ Encode various settings in puppet.conf and setup Nextdoor external Facts. """ configure_puppet_external_facts() bootstrap_puppet_agent_config()
def configure_puppet_agent(): """ Encode various settings in puppet.conf and setup Nextdoor external Facts. """ configure_puppet_external_facts() bootstrap_puppet_agent_config()
Python
def clean_rightscale_tags(): """ Upon succcesful Puppet convergence, remove the pre-shared key tag used for autosigning and flip Puppet state tag from 'waiting' to 'signed'. """ assert_command("puppet resource rs_tag nd:puppet_state value='signed'", "Failed when flipping nd:puppet_state value to 'signed'.") assert_command("puppet resource rs_tag nd:puppet_secret ensure=absent", "Failed when removing nd:puppet_secret!")
def clean_rightscale_tags(): """ Upon succcesful Puppet convergence, remove the pre-shared key tag used for autosigning and flip Puppet state tag from 'waiting' to 'signed'. """ assert_command("puppet resource rs_tag nd:puppet_state value='signed'", "Failed when flipping nd:puppet_state value to 'signed'.") assert_command("puppet resource rs_tag nd:puppet_secret ensure=absent", "Failed when removing nd:puppet_secret!")
Python
def adjust_hostname_and_domain(): """ We assume the FQDN has previously been set in /etc/hostname but that doesn't necessarily mean that required adjustments have been made to /etc/hosts to align with our needs for DNS domain name. Let's idempotently make those adjustments. """ validate_env('DEFAULT_DOMAIN', '^.+$') mydomain = environ['DEFAULT_DOMAIN'] log_and_stdout( "Adjusting domain name for Puppet's use: {}".format(mydomain)) try: with NamedTemporaryFile() as puppet_code: code = """ host {{ \"${{::hostname}}.{}\": ensure => present, ip => $::ipaddress, host_aliases => $::hostname, }} """.format(mydomain) puppet_code.write(bytes(code, 'UTF-8')) puppet_code.flush() assert_command("puppet apply {}".format(puppet_code.name), 'Failed to munge /etc/hosts entry for correct FQDN!') except IOError as e: log_and_stdout("Puppet code tmpfile failed: {}".format(e.value)) finally: puppet_code.close()
def adjust_hostname_and_domain(): """ We assume the FQDN has previously been set in /etc/hostname but that doesn't necessarily mean that required adjustments have been made to /etc/hosts to align with our needs for DNS domain name. Let's idempotently make those adjustments. """ validate_env('DEFAULT_DOMAIN', '^.+$') mydomain = environ['DEFAULT_DOMAIN'] log_and_stdout( "Adjusting domain name for Puppet's use: {}".format(mydomain)) try: with NamedTemporaryFile() as puppet_code: code = """ host {{ \"${{::hostname}}.{}\": ensure => present, ip => $::ipaddress, host_aliases => $::hostname, }} """.format(mydomain) puppet_code.write(bytes(code, 'UTF-8')) puppet_code.flush() assert_command("puppet apply {}".format(puppet_code.name), 'Failed to munge /etc/hosts entry for correct FQDN!') except IOError as e: log_and_stdout("Puppet code tmpfile failed: {}".format(e.value)) finally: puppet_code.close()
Python
def find_ebs_volumes(awskey, awssecret, ebs_vol_list, ebs_volid_list): """Search Amazon for existing EBS Volume ids in our zone. If they exist, then mount them and return them. If they don't exist, we error out.""" # Some local instance ID info.. zone = commands.getoutput( "wget -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone") region = zone[:-1] instanceid = commands.getoutput( "wget -q -O - http://169.254.169.254/latest/meta-data/instance-id") available_ebs_vol_list = [] attached_ebs_vol_list = [] # Make sure that the device list we got is good. If a device exists already, # remove it from the potential 'device targets' for potential_volume in ebs_vol_list: if not os.path.exists(potential_volume): print("INFO: ({}) is available as a disk target.".format( potential_volume)) available_ebs_vol_list.append(potential_volume) # Reverse our available_ebs_vol_list so that we can 'pop' from the # beginning available_ebs_vol_list.reverse() # Make sure we have enough target devices available if available_ebs_vol_list <= ebs_volid_list.__len__(): print("ERROR: Do not have enough local volume targets available to attach the drives. Erroring out.") return False # Open our EC2 connection print("INFO: Connecting to Amazon...") ec2 = boto.connect_ec2(aws_access_key_id=awskey, aws_secret_access_key=awssecret) ec2 = boto.ec2.connect_to_region( region, aws_access_key_id=awskey, aws_secret_access_key=awssecret) # For each volume.. for ebs_volid in ebs_volid_list: print("INFO: ({}) Searching for EBS volume...".format(ebs_volid)) vols = ec2.get_all_volumes(volume_ids=ebs_volid) vol = vols[0] # Check if the volume is attached. If it is, bail! if not str(vol.attach_data.status) == "None" \ and not str(vol.attach_data.instance_id) == instanceid: print("ERROR: ({}) is attached to instance ID {} already. Exiting!".format( vol.id, vol.attach_data.instance_id)) return False # If its attached, but to our host already then figure out # what device its attached to. elif not str(vol.attach_data.status) == "None" \ and str(vol.attach_data.instance_id) == instanceid: print("WARNING: ({}) is already attached our instance ID at {}. Using that...".format( vol.id, vol.attach_data.device)) dest = vol.attach_data.device else: # Grab a volume off of our stack of available vols.. dest = available_ebs_vol_list.pop() # Attach the volume and wait for it to fully attach print("INFO: ({}) Attaching EBS volume to our instance ID ({}) to {}".format( vol.id, instanceid, dest)) vol.attach(instanceid, dest.replace('xvd', 'sd')) while not hasattr(vol.attach_data, 'instance_id'): time.sleep(1) vol.update() while not str(vol.attach_data.instance_id) == instanceid \ or True is not os.path.exists(dest): print("INFO: ({}) Volume attaching...".format(vol.id)) time.sleep(1) vol.update() # Sleep a few more seconds just to make sure the OS has seen the # volume time.sleep(1) # Check whether we are using /dev/xvd volumes or /dev/sd volumes. Amazon always returns a volume mount # point as '/dev/sdXXX' when sometimes its actually '/dev/xvdXXX'. if os.path.exists("/dev/xvda1"): dest = dest.replace('sd', 'xvd') print("INFO: ({}) Converting volume mount point to {}".format(vol.id, dest)) # Add the volume to our list of volumes that were created attached_ebs_vol_list.append(dest) print("INFO: ({}) Volume attached!".format(vol.id)) # Now, tag the volumes and move on tags = {} tags["Name"] = "{}-{}".format((socket.gethostname(), dest)) print("INFO: ({}) Taggin EBS volume with these tags: {}".format(vol.id, tags)) ec2.create_tags(str(vol.id), tags) # All done. Return whatever volumes were created and attached. return attached_ebs_vol_list
def find_ebs_volumes(awskey, awssecret, ebs_vol_list, ebs_volid_list): """Search Amazon for existing EBS Volume ids in our zone. If they exist, then mount them and return them. If they don't exist, we error out.""" # Some local instance ID info.. zone = commands.getoutput( "wget -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone") region = zone[:-1] instanceid = commands.getoutput( "wget -q -O - http://169.254.169.254/latest/meta-data/instance-id") available_ebs_vol_list = [] attached_ebs_vol_list = [] # Make sure that the device list we got is good. If a device exists already, # remove it from the potential 'device targets' for potential_volume in ebs_vol_list: if not os.path.exists(potential_volume): print("INFO: ({}) is available as a disk target.".format( potential_volume)) available_ebs_vol_list.append(potential_volume) # Reverse our available_ebs_vol_list so that we can 'pop' from the # beginning available_ebs_vol_list.reverse() # Make sure we have enough target devices available if available_ebs_vol_list <= ebs_volid_list.__len__(): print("ERROR: Do not have enough local volume targets available to attach the drives. Erroring out.") return False # Open our EC2 connection print("INFO: Connecting to Amazon...") ec2 = boto.connect_ec2(aws_access_key_id=awskey, aws_secret_access_key=awssecret) ec2 = boto.ec2.connect_to_region( region, aws_access_key_id=awskey, aws_secret_access_key=awssecret) # For each volume.. for ebs_volid in ebs_volid_list: print("INFO: ({}) Searching for EBS volume...".format(ebs_volid)) vols = ec2.get_all_volumes(volume_ids=ebs_volid) vol = vols[0] # Check if the volume is attached. If it is, bail! if not str(vol.attach_data.status) == "None" \ and not str(vol.attach_data.instance_id) == instanceid: print("ERROR: ({}) is attached to instance ID {} already. Exiting!".format( vol.id, vol.attach_data.instance_id)) return False # If its attached, but to our host already then figure out # what device its attached to. elif not str(vol.attach_data.status) == "None" \ and str(vol.attach_data.instance_id) == instanceid: print("WARNING: ({}) is already attached our instance ID at {}. Using that...".format( vol.id, vol.attach_data.device)) dest = vol.attach_data.device else: # Grab a volume off of our stack of available vols.. dest = available_ebs_vol_list.pop() # Attach the volume and wait for it to fully attach print("INFO: ({}) Attaching EBS volume to our instance ID ({}) to {}".format( vol.id, instanceid, dest)) vol.attach(instanceid, dest.replace('xvd', 'sd')) while not hasattr(vol.attach_data, 'instance_id'): time.sleep(1) vol.update() while not str(vol.attach_data.instance_id) == instanceid \ or True is not os.path.exists(dest): print("INFO: ({}) Volume attaching...".format(vol.id)) time.sleep(1) vol.update() # Sleep a few more seconds just to make sure the OS has seen the # volume time.sleep(1) # Check whether we are using /dev/xvd volumes or /dev/sd volumes. Amazon always returns a volume mount # point as '/dev/sdXXX' when sometimes its actually '/dev/xvdXXX'. if os.path.exists("/dev/xvda1"): dest = dest.replace('sd', 'xvd') print("INFO: ({}) Converting volume mount point to {}".format(vol.id, dest)) # Add the volume to our list of volumes that were created attached_ebs_vol_list.append(dest) print("INFO: ({}) Volume attached!".format(vol.id)) # Now, tag the volumes and move on tags = {} tags["Name"] = "{}-{}".format((socket.gethostname(), dest)) print("INFO: ({}) Taggin EBS volume with these tags: {}".format(vol.id, tags)) ec2.create_tags(str(vol.id), tags) # All done. Return whatever volumes were created and attached. return attached_ebs_vol_list
Python
def create_raid_volume(vols, raid_type): """ create_raid_volume(vols) creates a mdadm raid volume from the volumes that are passed ot it in the 'vols' array. if the volumes already have an mdadm array, then we just sanity check it and move on. """ # Check if the MD_VOL is taken or not. This script does not support a system with existing # md volumes, so if one exists, we exit quietly. # If the file exists, skip to the next one... if os.system("mdadm -D " + MD_VOL + " 2>&1") == 0: vol_list = '|'.join(vols) if os.system("mdadm -D " + MD_VOL + " 2>&1 | egrep '" + vol_list + "' 2>&1") == 0: print("WARNING: " + MD_VOL + " already exists and actually has our volumes in it, using that and passing it back.") return MD_VOL else: print("ERROR: " + MD_VOL + " alredy exists, but does NOT have our existing volumes in it. Exiting badly.") sys.exit(1) # Now, walk throu each of the volumes passed to us and figure out if they # are part of an array or not already. existing_vols = [] new_vols = [] for potential_volume in vols: if os.system("mdadm --examine " + potential_volume + " 2>&1") == 0: print("INFO: ({}) is already a member of an existing array... not overwriting.".format( (potential_volume))) existing_vols.append(potential_volume) else: print("INFO: ({}) is not a member of any existing array, so we will create a new array with it.".format( (potential_volume))) new_vols.append(potential_volume) # If we have more than 2 drives in existing_vols, assume thats correct if existing_vols.__len__() > 0: # Prep some variables vol_list = " ".join(existing_vols) cmd = "cat /proc/mdstat | grep ^md | awk '{print \"/dev/\"$1}' | xargs --no-run-if-empty -n 1 mdadm -S; mdadm --assemble {} {} 2>&1".format(( MD_VOL, vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: ({}) assembled from vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. ({}) could not be created... skipping.".format( (cmd, MD_VOL))) return False # If we have more than 2 drives in existing_vols, assume thats correct elif new_vols.__len__() > 0: # Prep some variables vol_list = " ".join(new_vols) cmd = "yes | mdadm --create --name=0 --force {} --level {} --raid-devices={} {} 2>&1" %\ (MD_VOL, str(raid_type), new_vols.__len__(), vol_list) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: {} created with vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. {} could not be created... skipping.".format( (cmd, MD_VOL))) return False else: return False # Lastly, create our mdadm config file if os.path.exists("/etc/mdadm"): md_conf = "/etc/mdadm/mdadm.conf" else: md_conf = "/etc/mdadm.conf" # Back up the MDADM conf os.system("cp " + md_conf + " " + md_conf + ".bak") # Now format our volume if False is mount_raid_volume(MD_VOL, options.fstype, options.mountpoint): print("ERROR: mount_raid_volume({}, {}, {}) failed. exiting script.".format( (MD_VOL, options.fstype, options.mountpoint))) sys.exit(1) # Get our UUID from the mdadm array # md_uuid = commands.getoutput("blkid " + MD_VOL + " | awk '{print $2}'") # flake8 reports this is dead code ^^ # Grep out any old md configs form mdadm.conf os.system("cat " + md_conf + " | grep -v UUID > " + md_conf) os.system("cat " + md_conf + " | grep -v DEVICE > " + md_conf) os.system("mdadm --detail --scan >> " + md_conf) os.system("echo DEVICE " + vol_list + " >> " + md_conf) # Return the created/mounted md vols return MD_VOL
def create_raid_volume(vols, raid_type): """ create_raid_volume(vols) creates a mdadm raid volume from the volumes that are passed ot it in the 'vols' array. if the volumes already have an mdadm array, then we just sanity check it and move on. """ # Check if the MD_VOL is taken or not. This script does not support a system with existing # md volumes, so if one exists, we exit quietly. # If the file exists, skip to the next one... if os.system("mdadm -D " + MD_VOL + " 2>&1") == 0: vol_list = '|'.join(vols) if os.system("mdadm -D " + MD_VOL + " 2>&1 | egrep '" + vol_list + "' 2>&1") == 0: print("WARNING: " + MD_VOL + " already exists and actually has our volumes in it, using that and passing it back.") return MD_VOL else: print("ERROR: " + MD_VOL + " alredy exists, but does NOT have our existing volumes in it. Exiting badly.") sys.exit(1) # Now, walk throu each of the volumes passed to us and figure out if they # are part of an array or not already. existing_vols = [] new_vols = [] for potential_volume in vols: if os.system("mdadm --examine " + potential_volume + " 2>&1") == 0: print("INFO: ({}) is already a member of an existing array... not overwriting.".format( (potential_volume))) existing_vols.append(potential_volume) else: print("INFO: ({}) is not a member of any existing array, so we will create a new array with it.".format( (potential_volume))) new_vols.append(potential_volume) # If we have more than 2 drives in existing_vols, assume thats correct if existing_vols.__len__() > 0: # Prep some variables vol_list = " ".join(existing_vols) cmd = "cat /proc/mdstat | grep ^md | awk '{print \"/dev/\"$1}' | xargs --no-run-if-empty -n 1 mdadm -S; mdadm --assemble {} {} 2>&1".format(( MD_VOL, vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: ({}) assembled from vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. ({}) could not be created... skipping.".format( (cmd, MD_VOL))) return False # If we have more than 2 drives in existing_vols, assume thats correct elif new_vols.__len__() > 0: # Prep some variables vol_list = " ".join(new_vols) cmd = "yes | mdadm --create --name=0 --force {} --level {} --raid-devices={} {} 2>&1" %\ (MD_VOL, str(raid_type), new_vols.__len__(), vol_list) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: {} created with vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. {} could not be created... skipping.".format( (cmd, MD_VOL))) return False else: return False # Lastly, create our mdadm config file if os.path.exists("/etc/mdadm"): md_conf = "/etc/mdadm/mdadm.conf" else: md_conf = "/etc/mdadm.conf" # Back up the MDADM conf os.system("cp " + md_conf + " " + md_conf + ".bak") # Now format our volume if False is mount_raid_volume(MD_VOL, options.fstype, options.mountpoint): print("ERROR: mount_raid_volume({}, {}, {}) failed. exiting script.".format( (MD_VOL, options.fstype, options.mountpoint))) sys.exit(1) # Get our UUID from the mdadm array # md_uuid = commands.getoutput("blkid " + MD_VOL + " | awk '{print $2}'") # flake8 reports this is dead code ^^ # Grep out any old md configs form mdadm.conf os.system("cat " + md_conf + " | grep -v UUID > " + md_conf) os.system("cat " + md_conf + " | grep -v DEVICE > " + md_conf) os.system("mdadm --detail --scan >> " + md_conf) os.system("echo DEVICE " + vol_list + " >> " + md_conf) # Return the created/mounted md vols return MD_VOL
Python
def mount_raid_volume(vol, fstype, mountpoint): """ prep_raid_volume(vol,fstype) checks if a volume is formatted already or not. if not, it formats it with the fstype requested """ # Check if 'vol' exists if not stat.S_ISBLK(os.stat(vol).st_mode): return False # Make sure that the mountpoint is available and nothing else is mounted # there. cmd = "mount | grep '" + mountpoint + \ "' | awk '{print $3}' | xargs --no-run-if-empty umount -f" os.system(cmd) # Sanity check our fstype. We may need to add options. if fstype == "xfs": fstype = "xfs -f" # Attempt to mount the filesystem... if it wont mount, then # assume its bad and try to format it. cmd = "(fsck -y {} 2>&1 ; mount {} {} -o {} 2>&1)".format( vol, vol, mountpoint, DEFAULT_MOUNTOPTS) if options.verbose: print("INFO: {}".format(cmd)) if 0 == os.system(cmd): print("INFO: ({}) already has a filesystem on it... mounting.".format((vol))) return True else: # If theres no filesystem on the device, create the one we want print("INFO: Formatting {} with {} and mounting it to {}...".format( (vol, fstype, mountpoint))) if os.system("mkfs." + fstype + " " + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: return True else: return False
def mount_raid_volume(vol, fstype, mountpoint): """ prep_raid_volume(vol,fstype) checks if a volume is formatted already or not. if not, it formats it with the fstype requested """ # Check if 'vol' exists if not stat.S_ISBLK(os.stat(vol).st_mode): return False # Make sure that the mountpoint is available and nothing else is mounted # there. cmd = "mount | grep '" + mountpoint + \ "' | awk '{print $3}' | xargs --no-run-if-empty umount -f" os.system(cmd) # Sanity check our fstype. We may need to add options. if fstype == "xfs": fstype = "xfs -f" # Attempt to mount the filesystem... if it wont mount, then # assume its bad and try to format it. cmd = "(fsck -y {} 2>&1 ; mount {} {} -o {} 2>&1)".format( vol, vol, mountpoint, DEFAULT_MOUNTOPTS) if options.verbose: print("INFO: {}".format(cmd)) if 0 == os.system(cmd): print("INFO: ({}) already has a filesystem on it... mounting.".format((vol))) return True else: # If theres no filesystem on the device, create the one we want print("INFO: Formatting {} with {} and mounting it to {}...".format( (vol, fstype, mountpoint))) if os.system("mkfs." + fstype + " " + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: return True else: return False
Python
def update_fstab(vol, fstype, mountpoint): """ Now that our mount point is finished, update fstab """ # Construct our fstab mount line mnt_line = "{} {} {} {} 0 0\n".format( vol, mountpoint, fstype, DEFAULT_MOUNTOPTS) # Make sure that no existing mount line is in the fstab cmd = '/bin/sed -i \'\{}/d\' /etc/fstab'.format(mountpoint) os.system(cmd) # Now add our line to the fstab with open('/etc/fstab', 'a') as f: f.write(mnt_line) return True
def update_fstab(vol, fstype, mountpoint): """ Now that our mount point is finished, update fstab """ # Construct our fstab mount line mnt_line = "{} {} {} {} 0 0\n".format( vol, mountpoint, fstype, DEFAULT_MOUNTOPTS) # Make sure that no existing mount line is in the fstab cmd = '/bin/sed -i \'\{}/d\' /etc/fstab'.format(mountpoint) os.system(cmd) # Now add our line to the fstab with open('/etc/fstab', 'a') as f: f.write(mnt_line) return True
Python
def find_files(pattern, excludes=[]): """ Recursive find of files matching pattern starting at location of this script. Args: pattern (str): filename pattern to match excludes: array of patterns for to exclude from find Returns: array: list of matching files """ matches = [] DEBUG = False for root, dirnames, filenames in walk(os.path.dirname(__file__)): for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) # Oh, lcomp sytnax... for exclude in excludes: matches = numpy.asarray( [match for match in matches if exclude not in match]) if DEBUG: print(Fore.YELLOW + "Matches in find_files is : {}".format(str(matches))) return matches
def find_files(pattern, excludes=[]): """ Recursive find of files matching pattern starting at location of this script. Args: pattern (str): filename pattern to match excludes: array of patterns for to exclude from find Returns: array: list of matching files """ matches = [] DEBUG = False for root, dirnames, filenames in walk(os.path.dirname(__file__)): for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) # Oh, lcomp sytnax... for exclude in excludes: matches = numpy.asarray( [match for match in matches if exclude not in match]) if DEBUG: print(Fore.YELLOW + "Matches in find_files is : {}".format(str(matches))) return matches
Python
def handle_repo(repo): """ Given a dictionary representing our repo settings, download and checkout. """ # These things are all required even though some of them could # be defaulted. required_keys = ('type', 'source', 'ref', 'destination') if not all(key in repo for key in required_keys): print(Fore.RED + str(repo)) print(Fore.RED + "repo spec must include: {}".format( str(required_keys))) sys.exit(-1) # For now, just git repos. But who knows...whatever comes after git? if 'git' != repo['type']: print(Fore.RED + str(repo)) print(Fore.RED + "repo type must be 'git'!") sys.exit(-1) # Rather than try to play clever games with any existing dep caches, # blow away what is in place and replace with a fresh clone + checkout. # 'prep' task is *meant* to run only rarely anyway. dest = str(repo['destination']) if os.path.exists(dest): print(Fore.BLUE + "{} already exists; removing...".format(dest)) result = run("rm -rf {}".format(dest), echo=True) if result.failed: print(Fore.RED + "Failed while removing {}".format(dest)) sys.exit(-1) try: os.makedirs(dest) except os.OSError as e: print(Fore.RED + "Failed creating directory '{}'".format(dest)) print(str(e)) # Fresh clone and checkout of the repo but *not* a submodule or subtree. # The dep is cleansed of .git directory. source = repo['source'] ref = repo['ref'] result = run("git clone {} {} && " "(cd {} && git checkout {}) && " " rm -rf {}/.git".format(source, dest, dest, ref, dest), echo=True) if result.failed: print(Fore.RED + "Failed checking out repo: {} / {} to '{}'!".format( source, ref, dest)) sys.exit(-1) # If he 'prep' key is present, run this command as a way of setting up # the external dep. if 'prep' in repo: prep = str(repo['prep']) print(Fore.BLUE + "Executing specified 'prep' command: {}".format(prep)) result = run(prep, echo=True) if result.failed: print(Fore.RED + "Failed while prepping!") sys.exit(-1) # If the 'persist' key is False, remove the directory after 'prep' if 'persist' in repo and False is repo['persist']: cmd = "rm -rf {}".format(dest) result = run(cmd, echo=True) if result.failed: print(Fore.RED + "Failed while removing non-persisted repo!")
def handle_repo(repo): """ Given a dictionary representing our repo settings, download and checkout. """ # These things are all required even though some of them could # be defaulted. required_keys = ('type', 'source', 'ref', 'destination') if not all(key in repo for key in required_keys): print(Fore.RED + str(repo)) print(Fore.RED + "repo spec must include: {}".format( str(required_keys))) sys.exit(-1) # For now, just git repos. But who knows...whatever comes after git? if 'git' != repo['type']: print(Fore.RED + str(repo)) print(Fore.RED + "repo type must be 'git'!") sys.exit(-1) # Rather than try to play clever games with any existing dep caches, # blow away what is in place and replace with a fresh clone + checkout. # 'prep' task is *meant* to run only rarely anyway. dest = str(repo['destination']) if os.path.exists(dest): print(Fore.BLUE + "{} already exists; removing...".format(dest)) result = run("rm -rf {}".format(dest), echo=True) if result.failed: print(Fore.RED + "Failed while removing {}".format(dest)) sys.exit(-1) try: os.makedirs(dest) except os.OSError as e: print(Fore.RED + "Failed creating directory '{}'".format(dest)) print(str(e)) # Fresh clone and checkout of the repo but *not* a submodule or subtree. # The dep is cleansed of .git directory. source = repo['source'] ref = repo['ref'] result = run("git clone {} {} && " "(cd {} && git checkout {}) && " " rm -rf {}/.git".format(source, dest, dest, ref, dest), echo=True) if result.failed: print(Fore.RED + "Failed checking out repo: {} / {} to '{}'!".format( source, ref, dest)) sys.exit(-1) # If he 'prep' key is present, run this command as a way of setting up # the external dep. if 'prep' in repo: prep = str(repo['prep']) print(Fore.BLUE + "Executing specified 'prep' command: {}".format(prep)) result = run(prep, echo=True) if result.failed: print(Fore.RED + "Failed while prepping!") sys.exit(-1) # If the 'persist' key is False, remove the directory after 'prep' if 'persist' in repo and False is repo['persist']: cmd = "rm -rf {}".format(dest) result = run(cmd, echo=True) if result.failed: print(Fore.RED + "Failed while removing non-persisted repo!")
Python
def prep(): """ Download and place external dependencies as a way to avoid git submodules/subtrees. Would be nice if librarian could be leveraged... """ newcwd = sys.path[0] if '' != newcwd: # I'm not sure this is absolutely necessary but to be careful... print(Fore.GREEN + "Changing cwd to {}".format(newcwd)) chdir(sys.path[0]) else: print(Fore.Red + "I am very confused about our sys.path[0] of ''!") sys.exit(-1) deps = {} with open('external_dependencies.yml') as deps: try: deps = yaml.load(deps) except yaml.YAMLError as e: print(Fore.RED + str(e)) sys.exit(-1) if 'repos' in deps: for repo in deps['repos']: chdir(sys.path[0]) handle_repo(deps['repos'][repo])
def prep(): """ Download and place external dependencies as a way to avoid git submodules/subtrees. Would be nice if librarian could be leveraged... """ newcwd = sys.path[0] if '' != newcwd: # I'm not sure this is absolutely necessary but to be careful... print(Fore.GREEN + "Changing cwd to {}".format(newcwd)) chdir(sys.path[0]) else: print(Fore.Red + "I am very confused about our sys.path[0] of ''!") sys.exit(-1) deps = {} with open('external_dependencies.yml') as deps: try: deps = yaml.load(deps) except yaml.YAMLError as e: print(Fore.RED + str(e)) sys.exit(-1) if 'repos' in deps: for repo in deps['repos']: chdir(sys.path[0]) handle_repo(deps['repos'][repo])
Python
def syntax(): """ Recursively syntax check various files. """ print(Fore.GREEN + "Syntax checking of YAML files...") yaml_files = find_files('*.yaml') + find_files('*.yml') for yaml_file in yaml_files: with open(yaml_file, 'r') as f: print(Fore.WHITE + yaml_file) try: yaml.load(f) except yaml.YAMLError as e: print(Fore.RED + str(e)) print(Fore.GREEN + "Syntax checking of Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "python -m py_compile {}".format(' '.join(python_files)) result = run(cmd, echo=True) print(Fore.GREEN + "Syntax checking of Ruby files...") ruby_files = find_files('*.rb') cmd = "ruby -c {}".format(' '.join(ruby_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
def syntax(): """ Recursively syntax check various files. """ print(Fore.GREEN + "Syntax checking of YAML files...") yaml_files = find_files('*.yaml') + find_files('*.yml') for yaml_file in yaml_files: with open(yaml_file, 'r') as f: print(Fore.WHITE + yaml_file) try: yaml.load(f) except yaml.YAMLError as e: print(Fore.RED + str(e)) print(Fore.GREEN + "Syntax checking of Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "python -m py_compile {}".format(' '.join(python_files)) result = run(cmd, echo=True) print(Fore.GREEN + "Syntax checking of Ruby files...") ruby_files = find_files('*.rb') cmd = "ruby -c {}".format(' '.join(ruby_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
Python
def lint_check(): """ Recursively lint check Python files in this project using flake8. """ print(Fore.GREEN + "Lint checking of Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "flake8 --count --statistics --show-source --show-pep8"\ " --max-line-length=160 --ignore={} {}".format( PEP8_IGNORE, ' '.join(python_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
def lint_check(): """ Recursively lint check Python files in this project using flake8. """ print(Fore.GREEN + "Lint checking of Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "flake8 --count --statistics --show-source --show-pep8"\ " --max-line-length=160 --ignore={} {}".format( PEP8_IGNORE, ' '.join(python_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
Python
def lint_fix(): """ Recursively lint check **and fix** Python files in this project using autopep8. """ print(Fore.GREEN + "Lint fixing Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "autopep8 -r --in-place --ignore={} {}".format( PEP8_IGNORE, ' '.join(python_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
def lint_fix(): """ Recursively lint check **and fix** Python files in this project using autopep8. """ print(Fore.GREEN + "Lint fixing Python files...") python_files = find_files('*.py', excludes=EXCLUDE_DIRS) cmd = "autopep8 -r --in-place --ignore={} {}".format( PEP8_IGNORE, ' '.join(python_files)) result = run(cmd, echo=True) # won't get here unless things run clean print(Fore.GREEN + "Exit code: {}".format(result.return_code))
Python
def normalize_hostname_to_rfc(mystr): """ Given a hostname, normalize to Nextdoor hostname standard Args: mystr (str): hostname to normalize Returns: normalized hostname Details: * lower everthing * delete anything which is not alphanumeric * compress multiple '.' or '-' * strip leading '-'s """ return re.sub('^[-]+', '', re.sub('[.]{2,}', '.', re.sub('[-]{2,}', '-', re.sub('[^a-z0-9-._]', '', mystr.lower()))))
def normalize_hostname_to_rfc(mystr): """ Given a hostname, normalize to Nextdoor hostname standard Args: mystr (str): hostname to normalize Returns: normalized hostname Details: * lower everthing * delete anything which is not alphanumeric * compress multiple '.' or '-' * strip leading '-'s """ return re.sub('^[-]+', '', re.sub('[.]{2,}', '.', re.sub('[-]{2,}', '-', re.sub('[^a-z0-9-._]', '', mystr.lower()))))
Python
def is_volumized(): """ Return boolean indicating if the system has been previsously 'volumized'. Returns: boolean Details: Presence of /etc/nextdoor/volumized is used to indicate the system has previously had volumes initialized into a RAID array. """ if os.path.exists('/etc/nextdoor/volumized'): return True else: return False
def is_volumized(): """ Return boolean indicating if the system has been previsously 'volumized'. Returns: boolean Details: Presence of /etc/nextdoor/volumized is used to indicate the system has previously had volumes initialized into a RAID array. """ if os.path.exists('/etc/nextdoor/volumized'): return True else: return False
Python
def log_and_stdout(msg): """ Send msg to to both stdout and syslog. Args: msg (str): message to send to stdout and syslog Returns: nothing """ if not isinstance(msg, (str, bytes, bytearray)): log_and_stdout(" *** Should never get here! Something passed was not" " a str, bytes, or a bytearray!") if isinstance(msg, (bytes, bytearray)): msg = msg.decode('utf-8', errors='replace') logger.info(msg) print(msg)
def log_and_stdout(msg): """ Send msg to to both stdout and syslog. Args: msg (str): message to send to stdout and syslog Returns: nothing """ if not isinstance(msg, (str, bytes, bytearray)): log_and_stdout(" *** Should never get here! Something passed was not" " a str, bytes, or a bytearray!") if isinstance(msg, (bytes, bytearray)): msg = msg.decode('utf-8', errors='replace') logger.info(msg) print(msg)
Python
def assert_command(cmd, msg, shell=False, cwd=None, retries=1): """ Execute the passsed command w/ optional cwd and retries. Args: cmd (str): command to execute msg (str): message to log to stdout and syslog upon execution shell (boolean) : optionally use shell instead of system() cwd (str): optionally set the cwd for the command retries: optionally retry the command some number of times Returns: boolean True on success; sys.exit upon failure Details: If 'retries' is specified, the command will be retried until it succeeds or the number of retries is exhausted. A single success run will return boolean True regardless of previous failures. """ attempts = 0 while attempts < retries: attempts += 1 ret = 0 output = '' try: progress = " *** Executing command ({} of {} attempts): {} *** ".format( attempts, retries, cmd) log_and_stdout(progress) output = check_output( cmd.split(), stderr=STDOUT, shell=shell, cwd=cwd) except CalledProcessError as e: ret = e.returncode output = e.output if 0 != ret: log_and_stdout(output) log_and_stdout("retcode: {} :: {}".format(ret, cmd)) if attempts == retries: log_and_stdout( "Exceeded specified retries: {} :: retcode: {} :: {}".format(retries, ret, msg)) sys.exit(ret) else: log_and_stdout(output) break return True
def assert_command(cmd, msg, shell=False, cwd=None, retries=1): """ Execute the passsed command w/ optional cwd and retries. Args: cmd (str): command to execute msg (str): message to log to stdout and syslog upon execution shell (boolean) : optionally use shell instead of system() cwd (str): optionally set the cwd for the command retries: optionally retry the command some number of times Returns: boolean True on success; sys.exit upon failure Details: If 'retries' is specified, the command will be retried until it succeeds or the number of retries is exhausted. A single success run will return boolean True regardless of previous failures. """ attempts = 0 while attempts < retries: attempts += 1 ret = 0 output = '' try: progress = " *** Executing command ({} of {} attempts): {} *** ".format( attempts, retries, cmd) log_and_stdout(progress) output = check_output( cmd.split(), stderr=STDOUT, shell=shell, cwd=cwd) except CalledProcessError as e: ret = e.returncode output = e.output if 0 != ret: log_and_stdout(output) log_and_stdout("retcode: {} :: {}".format(ret, cmd)) if attempts == retries: log_and_stdout( "Exceeded specified retries: {} :: retcode: {} :: {}".format(retries, ret, msg)) sys.exit(ret) else: log_and_stdout(output) break return True
Python
def validate_env(envvar, regex): """ Check for presence of env variable and validate its value against regex. Args: envvar(str): the env var to check regex: the regex used for validation Returns: boolean true if validation passes; sys.exit if not present or does not validate """ if envvar not in environ: msg = " *** \'{0}\' not found in environment! ***".format(envvar) log_and_stdout(msg) sys.exit(-1) if None is re.match(regex, os.environ[envvar]): msg = " *** \'{0}\'=\'{1}\' does not match RE \'{2}\'! ***".format( envvar, os.environ[envvar], regex) log_and_stdout(msg) sys.exit(-1) else: return True
def validate_env(envvar, regex): """ Check for presence of env variable and validate its value against regex. Args: envvar(str): the env var to check regex: the regex used for validation Returns: boolean true if validation passes; sys.exit if not present or does not validate """ if envvar not in environ: msg = " *** \'{0}\' not found in environment! ***".format(envvar) log_and_stdout(msg) sys.exit(-1) if None is re.match(regex, os.environ[envvar]): msg = " *** \'{0}\'=\'{1}\' does not match RE \'{2}\'! ***".format( envvar, os.environ[envvar], regex) log_and_stdout(msg) sys.exit(-1) else: return True
Python
def volumize(): """ Set the 'volumized' lock file. Details: This lock file is used to indicate the system has previously had volumes inititialized and RAIDed. """ assert_command('mkdir -p /etc/nextdoor/volumized', "Could not create Nextdoor's volumize lock file!")
def volumize(): """ Set the 'volumized' lock file. Details: This lock file is used to indicate the system has previously had volumes inititialized and RAIDed. """ assert_command('mkdir -p /etc/nextdoor/volumized', "Could not create Nextdoor's volumize lock file!")
Python
def detect_debug_mode(): """ If the shell's env variables include 'DEBUG' print all env vars to stdout. """ if "DEBUG" in environ: dump_environment(to_var=True)
def detect_debug_mode(): """ If the shell's env variables include 'DEBUG' print all env vars to stdout. """ if "DEBUG" in environ: dump_environment(to_var=True)
Python
def dump_environment(to_var=False): """ Dump the shell's env vars to stdout as JSON document. Args: to_var (boolean): also dump the env vars to /var? """ print( json.dumps({k: os.environ[k] for k in os.environ.keys()}, indent=4, sort_keys=True)) if to_var: try: with open('env.sh', 'w') as env_log: env_log.write("# {}\n".format(time.strftime("%c"))) for key, value in environ.items(): env_log.write('export {}="{}"\n'.format(key, value)) except IOError as e: log_and_stdout(str(e)) log_and_stdout("Failed to create env.sh in cookbook dir...")
def dump_environment(to_var=False): """ Dump the shell's env vars to stdout as JSON document. Args: to_var (boolean): also dump the env vars to /var? """ print( json.dumps({k: os.environ[k] for k in os.environ.keys()}, indent=4, sort_keys=True)) if to_var: try: with open('env.sh', 'w') as env_log: env_log.write("# {}\n".format(time.strftime("%c"))) for key, value in environ.items(): env_log.write('export {}="{}"\n'.format(key, value)) except IOError as e: log_and_stdout(str(e)) log_and_stdout("Failed to create env.sh in cookbook dir...")
Python
def apt_get_update(refresh_interval_mins=30): """ Run apt-get update if it has not been run within specified minutes. """ refresh_interval = refresh_interval_mins * 60 time_stamp_file = '/var/run/nextdoor_apt_get_update.lock' run_cmd = False # shall we run the apt-get if not os.path.exists(time_stamp_file): open(time_stamp_file, 'w').close() run_cmd = True else: try: mtime = os.path.getmtime(time_stamp_file) if time.time() - mtime > refresh_interval: open(time_stamp_file, 'w').close() run_cmd = True else: log_and_stdout(" *** apt-get update run < {} minutes ago." " Not re-running. *** ".format( str(refresh_interval_mins))) except OSError as e: run_cmd = True if True is run_cmd: assert_command('apt-get update', 'Failed to update package cache!', retries=5)
def apt_get_update(refresh_interval_mins=30): """ Run apt-get update if it has not been run within specified minutes. """ refresh_interval = refresh_interval_mins * 60 time_stamp_file = '/var/run/nextdoor_apt_get_update.lock' run_cmd = False # shall we run the apt-get if not os.path.exists(time_stamp_file): open(time_stamp_file, 'w').close() run_cmd = True else: try: mtime = os.path.getmtime(time_stamp_file) if time.time() - mtime > refresh_interval: open(time_stamp_file, 'w').close() run_cmd = True else: log_and_stdout(" *** apt-get update run < {} minutes ago." " Not re-running. *** ".format( str(refresh_interval_mins))) except OSError as e: run_cmd = True if True is run_cmd: assert_command('apt-get update', 'Failed to update package cache!', retries=5)
Python
def create_raid_volume(vols, raid_type): """ create_raid_volume(vols) creates a mdadm raid volume from the volumes that are passed ot it in the 'vols' array. if the volumes already have an mdadm array, then we just sanity check it and move on. """ # Check if the MD_VOL is taken or not. This script does not support a system with existing # md volumes, so if one exists, we exit quietly. # If the file exists, skip to the next one... if os.system("mdadm -D " + MD_VOL + " 2>&1") == 0: vol_list = '|'.join(vols) if os.system("mdadm -D " + MD_VOL + " 2>&1 | egrep '" + vol_list + "' 2>&1") == 0: print("WARNING: " + MD_VOL + " already exists and actually has our volumes in it, using that and passing it back.") return MD_VOL else: print("ERROR: " + MD_VOL + " alredy exists, but does NOT have our existing volumes in it. Exiting badly.") sys.exit(1) # Now, walk throu each of the volumes passed to us and figure out if they # are part of an array or not already. existing_vols = [] new_vols = [] for potential_volume in vols: if os.system("mdadm --examine " + potential_volume + " 2>&1") == 0: print("INFO: ({}) is already a member of an existing array... not overwriting.".format( (potential_volume))) existing_vols.append(potential_volume) else: print("INFO: ({}) is not a member of any existing array, so we will create a new array with it.".format( (potential_volume))) new_vols.append(potential_volume) # If we have more than 2 drives in existing_vols, assume thats correct if existing_vols.__len__() > 1: # Prep some variables vol_list = " ".join(existing_vols) cmd = "cat /proc/mdstat | grep ^md | awk '{print \"/dev/\"$1}' | xargs --no-run-if-empty -n 1 mdadm -S; mdadm --assemble {} {} 2>&1".format(( MD_VOL, vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: ({}) assembled from vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. ({}) could not be created... skipping.".format( (cmd, MD_VOL))) return False # If we have more than 2 drives in existing_vols, assume thats correct elif new_vols.__len__() > 1: # Prep some variables vol_list = " ".join(new_vols) cmd = "yes | mdadm --create --name=0 --force {} --level {} --raid-devices={} {} 2>&1".format( (MD_VOL, str(raid_type), new_vols.__len__(), vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: {} created with vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. {} could not be created... skipping.".format( (cmd, MD_VOL))) return False else: return False # Lastly, create our mdadm config file if os.path.exists("/etc/mdadm"): md_conf = "/etc/mdadm/mdadm.conf" else: md_conf = "/etc/mdadm.conf" # Back up the MDADM conf os.system("cp " + md_conf + " " + md_conf + ".bak") # Now format our volume if False is mount_raid_volume(MD_VOL, options.fstype, options.mountpoint): print("ERROR: mount_raid_volume({}, {}, {}) failed. exiting script.".format( (MD_VOL, options.fstype, options.mountpoint))) sys.exit(1) # Get our UUID from the mdadm array # md_uuid = commands.getoutput("blkid " + MD_VOL + " | awk '{print $2}'") # flake8 reports this is dead code ^^ # Grep out any old md configs form mdadm.conf os.system("cat " + md_conf + " | grep -v UUID > " + md_conf) os.system("cat " + md_conf + " | grep -v DEVICE > " + md_conf) os.system("mdadm --detail --scan >> " + md_conf) os.system("echo DEVICE " + vol_list + " >> " + md_conf) # Return the created/mounted md vols return MD_VOL
def create_raid_volume(vols, raid_type): """ create_raid_volume(vols) creates a mdadm raid volume from the volumes that are passed ot it in the 'vols' array. if the volumes already have an mdadm array, then we just sanity check it and move on. """ # Check if the MD_VOL is taken or not. This script does not support a system with existing # md volumes, so if one exists, we exit quietly. # If the file exists, skip to the next one... if os.system("mdadm -D " + MD_VOL + " 2>&1") == 0: vol_list = '|'.join(vols) if os.system("mdadm -D " + MD_VOL + " 2>&1 | egrep '" + vol_list + "' 2>&1") == 0: print("WARNING: " + MD_VOL + " already exists and actually has our volumes in it, using that and passing it back.") return MD_VOL else: print("ERROR: " + MD_VOL + " alredy exists, but does NOT have our existing volumes in it. Exiting badly.") sys.exit(1) # Now, walk throu each of the volumes passed to us and figure out if they # are part of an array or not already. existing_vols = [] new_vols = [] for potential_volume in vols: if os.system("mdadm --examine " + potential_volume + " 2>&1") == 0: print("INFO: ({}) is already a member of an existing array... not overwriting.".format( (potential_volume))) existing_vols.append(potential_volume) else: print("INFO: ({}) is not a member of any existing array, so we will create a new array with it.".format( (potential_volume))) new_vols.append(potential_volume) # If we have more than 2 drives in existing_vols, assume thats correct if existing_vols.__len__() > 1: # Prep some variables vol_list = " ".join(existing_vols) cmd = "cat /proc/mdstat | grep ^md | awk '{print \"/dev/\"$1}' | xargs --no-run-if-empty -n 1 mdadm -S; mdadm --assemble {} {} 2>&1".format(( MD_VOL, vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: ({}) assembled from vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. ({}) could not be created... skipping.".format( (cmd, MD_VOL))) return False # If we have more than 2 drives in existing_vols, assume thats correct elif new_vols.__len__() > 1: # Prep some variables vol_list = " ".join(new_vols) cmd = "yes | mdadm --create --name=0 --force {} --level {} --raid-devices={} {} 2>&1".format( (MD_VOL, str(raid_type), new_vols.__len__(), vol_list)) # Run the command and return the outpu if os.system(cmd) == 0: print("INFO: {} created with vols {}".format((MD_VOL, vol_list))) else: print("ERROR: ({}) failed. {} could not be created... skipping.".format( (cmd, MD_VOL))) return False else: return False # Lastly, create our mdadm config file if os.path.exists("/etc/mdadm"): md_conf = "/etc/mdadm/mdadm.conf" else: md_conf = "/etc/mdadm.conf" # Back up the MDADM conf os.system("cp " + md_conf + " " + md_conf + ".bak") # Now format our volume if False is mount_raid_volume(MD_VOL, options.fstype, options.mountpoint): print("ERROR: mount_raid_volume({}, {}, {}) failed. exiting script.".format( (MD_VOL, options.fstype, options.mountpoint))) sys.exit(1) # Get our UUID from the mdadm array # md_uuid = commands.getoutput("blkid " + MD_VOL + " | awk '{print $2}'") # flake8 reports this is dead code ^^ # Grep out any old md configs form mdadm.conf os.system("cat " + md_conf + " | grep -v UUID > " + md_conf) os.system("cat " + md_conf + " | grep -v DEVICE > " + md_conf) os.system("mdadm --detail --scan >> " + md_conf) os.system("echo DEVICE " + vol_list + " >> " + md_conf) # Return the created/mounted md vols return MD_VOL
Python
def mount_raid_volume(vol, fstype, mountpoint): """ prep_raid_volume(vol,fstype) checks if a volume is formatted already or not. if not, it formats it with the fstype requested """ # Check if 'vol' exists if not stat.S_ISBLK(os.stat(vol).st_mode): return False # Make sure that the mountpoint is available and nothing else is mounted # there. cmd = "mount | grep '" + mountpoint + \ "' | awk '{print $3}' | xargs --no-run-if-empty umount -f" os.system(cmd) # Sanity check our fstype. We may need to add options. if fstype == "xfs": fstype = "xfs -f" # Attempt to mount the filesystem... if it wont mount, then # assume its bad and try to format it. if os.system("fsck -y" + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: print("INFO: ({}) already has a filesystem on it... mounting.".format((vol))) return True else: # If theres no filesystem on the device, create the one we want print("INFO: Formatting {} with {} and mounting it to {}...".format( (vol, fstype, mountpoint))) if os.system("mkfs." + fstype + " " + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: return True else: return False
def mount_raid_volume(vol, fstype, mountpoint): """ prep_raid_volume(vol,fstype) checks if a volume is formatted already or not. if not, it formats it with the fstype requested """ # Check if 'vol' exists if not stat.S_ISBLK(os.stat(vol).st_mode): return False # Make sure that the mountpoint is available and nothing else is mounted # there. cmd = "mount | grep '" + mountpoint + \ "' | awk '{print $3}' | xargs --no-run-if-empty umount -f" os.system(cmd) # Sanity check our fstype. We may need to add options. if fstype == "xfs": fstype = "xfs -f" # Attempt to mount the filesystem... if it wont mount, then # assume its bad and try to format it. if os.system("fsck -y" + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: print("INFO: ({}) already has a filesystem on it... mounting.".format((vol))) return True else: # If theres no filesystem on the device, create the one we want print("INFO: Formatting {} with {} and mounting it to {}...".format( (vol, fstype, mountpoint))) if os.system("mkfs." + fstype + " " + vol + " 2>&1; mount " + vol + " " + mountpoint + " -o " + DEFAULT_MOUNTOPTS + " 2>&1") == 0: return True else: return False
Python
def find_zone(self, name_or_id, ignore_missing=False): """ Find a zone by its name or ID :param name_or_id: The name or ID of a zone :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :return: One :class:`~ecl.dns.v2.zone.Zone` or :class:`~ecl.exceptions.ResourceNotFound`when no resource can be found. """ return self._find(_zone.Zone, name_or_id, ignore_missing=ignore_missing)
def find_zone(self, name_or_id, ignore_missing=False): """ Find a zone by its name or ID :param name_or_id: The name or ID of a zone :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :return: One :class:`~ecl.dns.v2.zone.Zone` or :class:`~ecl.exceptions.ResourceNotFound`when no resource can be found. """ return self._find(_zone.Zone, name_or_id, ignore_missing=ignore_missing)
Python
def update_zone(self, zone, ttl=None, description=None, email=None, masters=None): """ Update the attribute(s) for an existing zone. :param zone: ID for the zone or zone instance to update. :param ttl: TTL (Time to Live) for the zone. This parameter is not currently supported. :param description: Description for this zone :param email: e-mail for the zone. Used in SOA records for the zone. This parameter is not currently supported. :param masters: For secondary zones. The servers to slave from to get DNS information. This parameter is not currently supported. :return: :class:`~ecl.dns.v2.zone.Zone` """ attr = {} if ttl is not None: attr["ttl"] = ttl if description is not None: attr["description"] = description if email is not None: attr["email"] = email if masters is not None: attr["masters"] = masters if not isinstance(zone, _zone.Zone): zone = self._get_resource(_zone.Zone, zone) zone._body.clean() return self._update(_zone.Zone, zone, **attr)
def update_zone(self, zone, ttl=None, description=None, email=None, masters=None): """ Update the attribute(s) for an existing zone. :param zone: ID for the zone or zone instance to update. :param ttl: TTL (Time to Live) for the zone. This parameter is not currently supported. :param description: Description for this zone :param email: e-mail for the zone. Used in SOA records for the zone. This parameter is not currently supported. :param masters: For secondary zones. The servers to slave from to get DNS information. This parameter is not currently supported. :return: :class:`~ecl.dns.v2.zone.Zone` """ attr = {} if ttl is not None: attr["ttl"] = ttl if description is not None: attr["description"] = description if email is not None: attr["email"] = email if masters is not None: attr["masters"] = masters if not isinstance(zone, _zone.Zone): zone = self._get_resource(_zone.Zone, zone) zone._body.clean() return self._update(_zone.Zone, zone, **attr)
Python
def recordsets(self, zone_id, limit=None, marker=None): """ This lists all recordsets in a zone. :param zone_id: ID for the zone :param limit: Requests a page size of items(1-500). Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :param marker: The ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :return: One list of :class:`~ecl.dns.v2.recordsets.Recordsets` """ attrs = {} attrs["zone_id"] = zone_id if limit is not None: attrs["limit"] = limit if marker is not None: attrs["marker"] = marker return list(self._list(_recordset.RecordSet, **attrs))
def recordsets(self, zone_id, limit=None, marker=None): """ This lists all recordsets in a zone. :param zone_id: ID for the zone :param limit: Requests a page size of items(1-500). Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :param marker: The ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. :return: One list of :class:`~ecl.dns.v2.recordsets.Recordsets` """ attrs = {} attrs["zone_id"] = zone_id if limit is not None: attrs["limit"] = limit if marker is not None: attrs["marker"] = marker return list(self._list(_recordset.RecordSet, **attrs))
Python
def find_recordset(self, zone_id, name_or_id, ignore_missing=False): """ Find a recordset by its name or ID. :param name_or_id: Name or ID for this recordset :return: One :class:`~ecl.dns.v2.recordsets.Recordsets` or :class:`~ecl.exceptions.ResourceNotFound`when no resource can be found. """ return self._find(_recordset.RecordSet, name_or_id, zone_id=zone_id, ignore_missing=ignore_missing)
def find_recordset(self, zone_id, name_or_id, ignore_missing=False): """ Find a recordset by its name or ID. :param name_or_id: Name or ID for this recordset :return: One :class:`~ecl.dns.v2.recordsets.Recordsets` or :class:`~ecl.exceptions.ResourceNotFound`when no resource can be found. """ return self._find(_recordset.RecordSet, name_or_id, zone_id=zone_id, ignore_missing=ignore_missing)
Python
def create_recordset(self, zone_id, name=None, description=None, type=None, ttl=None, records=None): """ Create a recordset in a zone. :param zone_id: ID for the zone. :param name: DNS Name for the recordset. :param description: Description for this recordset. :param type: RRTYPE of the recordset. Valid Values: A | AAAA | MX | CNAME | SRV | SPF | TXT | PTR | NS :param ttl: TTL (Time to Live) for the recordset. :param recodrs: A list of data for this recordset. Each item will be a separate record in ECL2.0 DNS. These items should conform to the DNS spec for the record type - e.g. A records must be IPv4 addresses, CNAME records must be a hostname. :return: :class:`~ecl.dns.v2.recordsets.Recordsets` """ attr = {"name": name} if ttl is not None: attr["ttl"] = ttl if type is not None: attr["type"] = type if description is not None: attr["description"] = description if records is not None: attr["records"] = records return self._create(_recordset.RecordSet, zone_id=zone_id, **attr)
def create_recordset(self, zone_id, name=None, description=None, type=None, ttl=None, records=None): """ Create a recordset in a zone. :param zone_id: ID for the zone. :param name: DNS Name for the recordset. :param description: Description for this recordset. :param type: RRTYPE of the recordset. Valid Values: A | AAAA | MX | CNAME | SRV | SPF | TXT | PTR | NS :param ttl: TTL (Time to Live) for the recordset. :param recodrs: A list of data for this recordset. Each item will be a separate record in ECL2.0 DNS. These items should conform to the DNS spec for the record type - e.g. A records must be IPv4 addresses, CNAME records must be a hostname. :return: :class:`~ecl.dns.v2.recordsets.Recordsets` """ attr = {"name": name} if ttl is not None: attr["ttl"] = ttl if type is not None: attr["type"] = type if description is not None: attr["description"] = description if records is not None: attr["records"] = records return self._create(_recordset.RecordSet, zone_id=zone_id, **attr)
Python
def virtual_network_appliance_plans(self, **params): """List virtual network appliance plans. :param params: The parameters as query string format to get network appliance plans. :returns: A list of network appliance plans. :rtype: list of :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance_plan.VirtualNetworkAppliancePlan` """ return list(self._list( _virtual_network_appliance_plan.VirtualNetworkAppliancePlan, paginated=False, **params))
def virtual_network_appliance_plans(self, **params): """List virtual network appliance plans. :param params: The parameters as query string format to get network appliance plans. :returns: A list of network appliance plans. :rtype: list of :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance_plan.VirtualNetworkAppliancePlan` """ return list(self._list( _virtual_network_appliance_plan.VirtualNetworkAppliancePlan, paginated=False, **params))
Python
def update_virtual_network_appliance(self, virtual_network_appliance, **body): """Update a virtual network appliance. :param virtual_network_appliance: ID of specified virtual network appliance. :param :attrs \*\*params: Parameters for updating specified virtual network appliance. :returns: :class:`~ecl.virtual_network_appliance. v1.virtual_network_appliance.VirtualNetworkAppliance` """ return self._update(_virtual_network_appliance.VirtualNetworkAppliance, virtual_network_appliance, **body)
def update_virtual_network_appliance(self, virtual_network_appliance, **body): """Update a virtual network appliance. :param virtual_network_appliance: ID of specified virtual network appliance. :param :attrs \*\*params: Parameters for updating specified virtual network appliance. :returns: :class:`~ecl.virtual_network_appliance. v1.virtual_network_appliance.VirtualNetworkAppliance` """ return self._update(_virtual_network_appliance.VirtualNetworkAppliance, virtual_network_appliance, **body)
Python
def find_virtual_network_appliance(self, name_or_id, ignore_missing=False): """Find a single virtual network appliance. :param name_or_id: The name or ID of a virtual network appliance. :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance.VirtualNetworkAppliance` or None """ return self._find(_virtual_network_appliance.VirtualNetworkAppliance, name_or_id, ignore_missing=ignore_missing)
def find_virtual_network_appliance(self, name_or_id, ignore_missing=False): """Find a single virtual network appliance. :param name_or_id: The name or ID of a virtual network appliance. :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance.VirtualNetworkAppliance` or None """ return self._find(_virtual_network_appliance.VirtualNetworkAppliance, name_or_id, ignore_missing=ignore_missing)
Python
def start_virtual_network_appliance(self, virtual_network_appliance): """Start the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.start(self.session)
def start_virtual_network_appliance(self, virtual_network_appliance): """Start the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.start(self.session)
Python
def stop_virtual_network_appliance(self, virtual_network_appliance): """Stop the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.stop(self.session)
def stop_virtual_network_appliance(self, virtual_network_appliance): """Stop the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.stop(self.session)
Python
def restart_virtual_network_appliance(self, virtual_network_appliance): """Restart the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.restart(self.session)
def restart_virtual_network_appliance(self, virtual_network_appliance): """Restart the virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.restart(self.session)
Python
def reset_password_virtual_network_appliance(self, virtual_network_appliance): """Reset the password of virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.reset_password(self.session)
def reset_password_virtual_network_appliance(self, virtual_network_appliance): """Reset the password of virtual network appliance. :param virtual_network_appliance: The ID of a virtual network appliance. :return: <Response 200> """ virtual_network_appliance = \ self.get_virtual_network_appliance(virtual_network_appliance) return virtual_network_appliance.reset_password(self.session)
Python
def find_virtual_network_appliance_plan(self, name_or_id, ignore_missing=False): """Find a single virtual network appliance plan. :param name_or_id: The name or ID of a virtual network appliance plan. :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance_plan.VirtualNetworkAppliancePlan` or None """ return self._find( _virtual_network_appliance_plan.VirtualNetworkAppliancePlan, name_or_id, ignore_missing=ignore_missing)
def find_virtual_network_appliance_plan(self, name_or_id, ignore_missing=False): """Find a single virtual network appliance plan. :param name_or_id: The name or ID of a virtual network appliance plan. :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :returns: One :class:`~ecl.virtual_network_appliance.v1. virtual_network_appliance_plan.VirtualNetworkAppliancePlan` or None """ return self._find( _virtual_network_appliance_plan.VirtualNetworkAppliancePlan, name_or_id, ignore_missing=ignore_missing)
Python
def _translate_recordsets(self, response, has_body=True): """ In order to handle the response. Response example: {"recordsets": [ {"id":"fcb86eb9-8f8d-4cfd-8309-9052236d75df", "zone_id":"d4f0ea0e-edb6-4bbb-aefd-2944457be234", "records":["203.0.143.22"], "ttl":3600,"name":"ns3.base.co.jp.", "description":null,"type":"A","version":1, "created_at":"","updated_at":null, "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets/fcb86eb9-8f8d-4cfd-8309-9052236d75df"}}, {"id":"b0590460-11b3-413d-ad95-5cd3f4b01c27", "zone_id":"d4f0ea0e-edb6-4bbb-aefd-2944457be234", "records":["203.0.143.23"],"ttl":3600, "name":"ns3.base.co.jp.", "description":null,"type":"A", "version":1,"created_at":"", "updated_at":null, "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets/b0590460-11b3-413d-ad95-5cd3f4b01c27"}} ], "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets"}, "metadata":{"total_count":2}} """ if has_body: body = response.json() body = body[self.resources_key] for data in body: value = self.existing(**data) yield value
def _translate_recordsets(self, response, has_body=True): """ In order to handle the response. Response example: {"recordsets": [ {"id":"fcb86eb9-8f8d-4cfd-8309-9052236d75df", "zone_id":"d4f0ea0e-edb6-4bbb-aefd-2944457be234", "records":["203.0.143.22"], "ttl":3600,"name":"ns3.base.co.jp.", "description":null,"type":"A","version":1, "created_at":"","updated_at":null, "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets/fcb86eb9-8f8d-4cfd-8309-9052236d75df"}}, {"id":"b0590460-11b3-413d-ad95-5cd3f4b01c27", "zone_id":"d4f0ea0e-edb6-4bbb-aefd-2944457be234", "records":["203.0.143.23"],"ttl":3600, "name":"ns3.base.co.jp.", "description":null,"type":"A", "version":1,"created_at":"", "updated_at":null, "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets/b0590460-11b3-413d-ad95-5cd3f4b01c27"}} ], "links":{"self":"https://dns-lab3ec-ecl.lab.api.ntt.com/v2/zones/d4f0ea0e-edb6-4bbb-aefd-2944457be234/recordsets"}, "metadata":{"total_count":2}} """ if has_body: body = response.json() body = body[self.resources_key] for data in body: value = self.existing(**data) yield value
Python
def create(self, session, prepend_key=True): """ Recordset allow creating several records once. Thus return a list. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") if self.put_create: request = self._prepare_request(requires_id=True, prepend_key=prepend_key) response = session.put(request.uri, endpoint_filter=self.service, json=request.body, headers=request.headers) else: request = self._prepare_request(requires_id=False, prepend_key=prepend_key) response = session.post(request.uri, endpoint_filter=self.service, json=request.body, headers=request.headers) return list(self._translate_recordsets(response, has_body=True))
def create(self, session, prepend_key=True): """ Recordset allow creating several records once. Thus return a list. """ if not self.allow_create: raise exceptions.MethodNotSupported(self, "create") if self.put_create: request = self._prepare_request(requires_id=True, prepend_key=prepend_key) response = session.put(request.uri, endpoint_filter=self.service, json=request.body, headers=request.headers) else: request = self._prepare_request(requires_id=False, prepend_key=prepend_key) response = session.post(request.uri, endpoint_filter=self.service, json=request.body, headers=request.headers) return list(self._translate_recordsets(response, has_body=True))
Python
def find(cls, session, name_or_id, zone_id, ignore_missing=False, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~ecl.session.Session` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param zone_id: ID for the zone :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~ecl.resource2.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`ecl.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`ecl.exceptions.ResourceNotFound` if nothing is found and ignore_missing is ``False``. """ # Try to short-circuit by looking directly for a matching ID. data = list(cls.list(session, zone_id=zone_id, **params)) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.ResourceNotFound( "No %s found for %s" % (cls.__name__, name_or_id))
def find(cls, session, name_or_id, zone_id, ignore_missing=False, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~ecl.session.Session` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param zone_id: ID for the zone :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~ecl.resource2.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`ecl.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`ecl.exceptions.ResourceNotFound` if nothing is found and ignore_missing is ``False``. """ # Try to short-circuit by looking directly for a matching ID. data = list(cls.list(session, zone_id=zone_id, **params)) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.ResourceNotFound( "No %s found for %s" % (cls.__name__, name_or_id))
Python
def ensure_deploy_dir(app_dir): ''' make sure deploy dir exists and is writable Returns ------- tuple bool failed (whether we failed) bool changed (whether we changed) str message - error on failure or directory when changed ''' changed = False created_dir = '' deploy_dir = os.path.dirname(app_dir) if os.path.exists(app_dir): if os.path.islink(app_dir): app_dir = os.path.realpath(app_dir) if os.path.exists(deploy_dir): if os.path.islink(deploy_dir): deploy_dir = os.path.realpath(deploy_dir) if not os.path.exists(app_dir): try: os.stat(os.path.dirname(deploy_dir)) os.makedirs(app_dir) changed = True created_dir = app_dir except OSError: e = get_exception() if "permission denied" in to_native(e).lower(): msg="Destination parent directory {} is not accessible".format(os.path.dirname(ddeploy_dir)) return (True, False, msg) if not "file exists" in to_native(e).lower(): raise e if not os.path.isdir(app_dir): return (True, False, 'app_dir is not a directory') if not os.access(app_dir, os.W_OK): msg="Destination {} not writable".format(os.path.dirname(app_dir)) return (True, False, msg) return (False, changed, created_dir)
def ensure_deploy_dir(app_dir): ''' make sure deploy dir exists and is writable Returns ------- tuple bool failed (whether we failed) bool changed (whether we changed) str message - error on failure or directory when changed ''' changed = False created_dir = '' deploy_dir = os.path.dirname(app_dir) if os.path.exists(app_dir): if os.path.islink(app_dir): app_dir = os.path.realpath(app_dir) if os.path.exists(deploy_dir): if os.path.islink(deploy_dir): deploy_dir = os.path.realpath(deploy_dir) if not os.path.exists(app_dir): try: os.stat(os.path.dirname(deploy_dir)) os.makedirs(app_dir) changed = True created_dir = app_dir except OSError: e = get_exception() if "permission denied" in to_native(e).lower(): msg="Destination parent directory {} is not accessible".format(os.path.dirname(ddeploy_dir)) return (True, False, msg) if not "file exists" in to_native(e).lower(): raise e if not os.path.isdir(app_dir): return (True, False, 'app_dir is not a directory') if not os.access(app_dir, os.W_OK): msg="Destination {} not writable".format(os.path.dirname(app_dir)) return (True, False, msg) return (False, changed, created_dir)
Python
def untar_in_place(module): ''' untar the tarball into the correctly timestamped directory Parameters ---------- tuple str source (either source filepath, or source filename) str version directory (this is the time-and-scm-stamp subdirectoy we need to create. should be a subdirectory of app_dir) Returns ------- tuple bool failed (whether we failed) bool changed if we created anything str message - directory created, error message on fail, or '' on no change ''' source = module.params['src'] app_dir = module.params['app_dir'] version_dir = module.params['version_dir'] source_filepath = os.path.join(tempfile.gettempdir(), os.path.basename(source)) tar_extract_cmd = ["tar", "xf", source_filepath] if not os.path.isabs(version_dir): return (True, False, "version directory is not absolute") tmp_extract_dir = tempfile.mkdtemp() with _runatpath(tmp_extract_dir): rc, out, err = module.run_command(tar_extract_cmd) if rc: return (True, False, "Failed to extract tarfile. rc: {}, stdout: {}, stderr: {}".format(str(rc), out, err)) chown_cmd = _get_chown_cmd(module).append(tmp_extract_dir) if chown_cmd: rc, out, err = module.run_command(chown_cmd) if rc: return (True, False, "Failed to set permissions. rc: {}, stdout: {}, stderr: {}".format(str(rc), out, err)) if os.path.exists(version_dir): cur_new_comp = dircmp(tmp_extract_dir, version_dir) if len(cur_new_comp.diff_files) == 0: return (False, False, '') if module.params['force']: shutil.copytree(tmp_extract_dir, version_dir) return (False, True, version_dir) shutil.copytree(tmp_extract_dir, version_dir) return (False, True, version_dir)
def untar_in_place(module): ''' untar the tarball into the correctly timestamped directory Parameters ---------- tuple str source (either source filepath, or source filename) str version directory (this is the time-and-scm-stamp subdirectoy we need to create. should be a subdirectory of app_dir) Returns ------- tuple bool failed (whether we failed) bool changed if we created anything str message - directory created, error message on fail, or '' on no change ''' source = module.params['src'] app_dir = module.params['app_dir'] version_dir = module.params['version_dir'] source_filepath = os.path.join(tempfile.gettempdir(), os.path.basename(source)) tar_extract_cmd = ["tar", "xf", source_filepath] if not os.path.isabs(version_dir): return (True, False, "version directory is not absolute") tmp_extract_dir = tempfile.mkdtemp() with _runatpath(tmp_extract_dir): rc, out, err = module.run_command(tar_extract_cmd) if rc: return (True, False, "Failed to extract tarfile. rc: {}, stdout: {}, stderr: {}".format(str(rc), out, err)) chown_cmd = _get_chown_cmd(module).append(tmp_extract_dir) if chown_cmd: rc, out, err = module.run_command(chown_cmd) if rc: return (True, False, "Failed to set permissions. rc: {}, stdout: {}, stderr: {}".format(str(rc), out, err)) if os.path.exists(version_dir): cur_new_comp = dircmp(tmp_extract_dir, version_dir) if len(cur_new_comp.diff_files) == 0: return (False, False, '') if module.params['force']: shutil.copytree(tmp_extract_dir, version_dir) return (False, True, version_dir) shutil.copytree(tmp_extract_dir, version_dir) return (False, True, version_dir)
Python
def _get_chown_cmd(module): ''' construct cmd for chowning extracted files ''' if module.params['owner']: if module.params['group']: cmd = ['chown', '-R', '{}:{}'.format( module.params['owner'], module.params['group'])] else: cmd = ['chown', '-R', module.params['owner']] elif module.params['group']: cmd = ['chgrp', '-R', module.params['group']] else: cmd = [] return cmd
def _get_chown_cmd(module): ''' construct cmd for chowning extracted files ''' if module.params['owner']: if module.params['group']: cmd = ['chown', '-R', '{}:{}'.format( module.params['owner'], module.params['group'])] else: cmd = ['chown', '-R', module.params['owner']] elif module.params['group']: cmd = ['chgrp', '-R', module.params['group']] else: cmd = [] return cmd
Python
def chromsizes(inFile): """ Fetch the chromosome list of a bbi file. Returns an ordered dictionary of chromosome names mapped to their sizes in bp. Parameters ---------- inFile : str Path to BigWig or BigBed file. Returns ------- OrderedDict (str -> int) """ with cbbi.open(inFile) as f: return f.chromsizes
def chromsizes(inFile): """ Fetch the chromosome list of a bbi file. Returns an ordered dictionary of chromosome names mapped to their sizes in bp. Parameters ---------- inFile : str Path to BigWig or BigBed file. Returns ------- OrderedDict (str -> int) """ with cbbi.open(inFile) as f: return f.chromsizes
Python
def load_data(database_filepath): ''' INPUT database_filepath - path of database with cleaned data OUTPUT X - array of messages to be used as covariable Y - array of aim variables column_names - list of column names for the aim variables This function splits the dataset in aim and covariables ''' dbfile = 'sqlite:///' + database_filepath engine = create_engine(dbfile) df = pd.read_sql(database_filepath.replace(".db", "").replace("data/", ""), engine) X = df.message.values Y = df.drop(columns=['id','message']).values.astype(str) column_names = df.drop(columns=['id','message']).columns return X, Y, column_names
def load_data(database_filepath): ''' INPUT database_filepath - path of database with cleaned data OUTPUT X - array of messages to be used as covariable Y - array of aim variables column_names - list of column names for the aim variables This function splits the dataset in aim and covariables ''' dbfile = 'sqlite:///' + database_filepath engine = create_engine(dbfile) df = pd.read_sql(database_filepath.replace(".db", "").replace("data/", ""), engine) X = df.message.values Y = df.drop(columns=['id','message']).values.astype(str) column_names = df.drop(columns=['id','message']).columns return X, Y, column_names
Python
def build_model(): ''' INPUT None OUTPUT pipeline - machine learning pipeline This function vectorizes and transforms (TFID) the covariable and trains a multi-output Random Forest model ''' pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())) ]) parameters = { 'clf__estimator__max_features': ('auto',5,10,20,30), 'clf__estimator__max_leaf_nodes': (None, 5,10,20) } pipeline = GridSearchCV(pipeline, param_grid=parameters) return pipeline
def build_model(): ''' INPUT None OUTPUT pipeline - machine learning pipeline This function vectorizes and transforms (TFID) the covariable and trains a multi-output Random Forest model ''' pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())) ]) parameters = { 'clf__estimator__max_features': ('auto',5,10,20,30), 'clf__estimator__max_leaf_nodes': (None, 5,10,20) } pipeline = GridSearchCV(pipeline, param_grid=parameters) return pipeline
Python
def evaluate_model(model, X_test, Y_test, category_names): ''' INPUT model - machine learning model X_test - co-variable of the test dataset Y_test - aim variable of the test dataset category_names - names of the aim variables OUTPUT None This function uses the machine learning model to predict based on the co-variable of the test dataset and compares this with the aim variable of the test dataset ''' predicted = model.predict(X_test) y_true = pd.DataFrame(Y_test,columns=category_names) y_pred = pd.DataFrame(predicted,columns=category_names) for colnam in y_true.columns: print("Test of " + colnam + ":") print(classification_report(y_true[colnam], y_pred[colnam]))
def evaluate_model(model, X_test, Y_test, category_names): ''' INPUT model - machine learning model X_test - co-variable of the test dataset Y_test - aim variable of the test dataset category_names - names of the aim variables OUTPUT None This function uses the machine learning model to predict based on the co-variable of the test dataset and compares this with the aim variable of the test dataset ''' predicted = model.predict(X_test) y_true = pd.DataFrame(Y_test,columns=category_names) y_pred = pd.DataFrame(predicted,columns=category_names) for colnam in y_true.columns: print("Test of " + colnam + ":") print(classification_report(y_true[colnam], y_pred[colnam]))
Python
def save_model(model, model_filepath): ''' INPUT model - machine learning model model_filepath - path to the file the model should be stored at OUTPUT None This function stores the machine learning model at the given filepath ''' outfile = open(model_filepath,'wb') pickle.dump(model,outfile) outfile.close()
def save_model(model, model_filepath): ''' INPUT model - machine learning model model_filepath - path to the file the model should be stored at OUTPUT None This function stores the machine learning model at the given filepath ''' outfile = open(model_filepath,'wb') pickle.dump(model,outfile) outfile.close()
Python
def load_data(messages_filepath, categories_filepath): ''' INPUT messages_filepath - filepath of the csv-file with the messages categories_filepath - filepath of the csv-file with the categories OUTPUT df - combined dataframe of messages and categories This function creates a combined dataframe of messages and categories ''' messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) df = pd.merge(messages,categories,on='id',how='outer') # create a dataframe of the 36 individual category columns categories = categories.categories.str.split(";",expand=True) row = list(categories.iloc[0]) category_colnames = [x[:-2] for x in row] # rename the columns of `categories` categories.columns = category_colnames # Convert category values to just numbers 0 or 1. for column in categories: # set each value to be the last character of the string categories[column] = categories[column].str.strip().str[-1] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column]) # Replace categories column in df with new category columns. df = df.drop(columns=['categories','original']) # concatenate the original dataframe with the new `categories` dataframe df = pd.concat([df, categories.reset_index(drop=True)], axis=1) return df
def load_data(messages_filepath, categories_filepath): ''' INPUT messages_filepath - filepath of the csv-file with the messages categories_filepath - filepath of the csv-file with the categories OUTPUT df - combined dataframe of messages and categories This function creates a combined dataframe of messages and categories ''' messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) df = pd.merge(messages,categories,on='id',how='outer') # create a dataframe of the 36 individual category columns categories = categories.categories.str.split(";",expand=True) row = list(categories.iloc[0]) category_colnames = [x[:-2] for x in row] # rename the columns of `categories` categories.columns = category_colnames # Convert category values to just numbers 0 or 1. for column in categories: # set each value to be the last character of the string categories[column] = categories[column].str.strip().str[-1] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column]) # Replace categories column in df with new category columns. df = df.drop(columns=['categories','original']) # concatenate the original dataframe with the new `categories` dataframe df = pd.concat([df, categories.reset_index(drop=True)], axis=1) return df
Python
def clean_data(df): ''' INPUT df - combined dataframe of messages and categories OUTPUT df - cleaned dataframe This function removes duplicates of the dataframe and drops NA values ''' df = df.drop_duplicates() df = df[df.related!=2] df = df.dropna() return df
def clean_data(df): ''' INPUT df - combined dataframe of messages and categories OUTPUT df - cleaned dataframe This function removes duplicates of the dataframe and drops NA values ''' df = df.drop_duplicates() df = df[df.related!=2] df = df.dropna() return df
Python
def save_data(df, database_filename): ''' INPUT df - dataframe database_filename - filename where the dataframe should be stored OUTPUT None This function stores the dataframe as sqlite file ''' dbfile = 'sqlite:///' + database_filename engine = create_engine(dbfile) df.to_sql(database_filename.replace(".db", "").replace("data/", ""), engine, index=False, if_exists='replace')
def save_data(df, database_filename): ''' INPUT df - dataframe database_filename - filename where the dataframe should be stored OUTPUT None This function stores the dataframe as sqlite file ''' dbfile = 'sqlite:///' + database_filename engine = create_engine(dbfile) df.to_sql(database_filename.replace(".db", "").replace("data/", ""), engine, index=False, if_exists='replace')
Python
def load_work_area_config(self): """ Try to load work area config from cached file :return: True if work area config was loaded, False otherwise """ hash = self.get_kine_hash() if hash is None: return False try: with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "rb") as cache_file: work_area_config = json.load(cache_file) self.work_area_config.update(work_area_config) return True except IOError: return False
def load_work_area_config(self): """ Try to load work area config from cached file :return: True if work area config was loaded, False otherwise """ hash = self.get_kine_hash() if hash is None: return False try: with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "rb") as cache_file: work_area_config = json.load(cache_file) self.work_area_config.update(work_area_config) return True except IOError: return False
Python
def save_work_area_config(self): """ Try to save work area config into cache file """ hash = self.get_kine_hash() with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "wb+") as cache_file: json.dump(self.work_area_config, cache_file, indent=2)
def save_work_area_config(self): """ Try to save work area config into cache file """ hash = self.get_kine_hash() with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "wb+") as cache_file: json.dump(self.work_area_config, cache_file, indent=2)
Python
def draw_robot_preview(self, ax, show_robot=False, show_work_area=True, **kwargs): """ Draw preview of robot and work area on matplotlib axis :param ax: :param show_robot: :param show_work_area: """ pass
def draw_robot_preview(self, ax, show_robot=False, show_work_area=True, **kwargs): """ Draw preview of robot and work area on matplotlib axis :param ax: :param show_robot: :param show_work_area: """ pass
Python
def handle_event(self, event_dict): """ Handle an event coming from the controller or widgets/params :param event_dict: Dict containing event data. Only mandatory key is event name: {"event": "event_name_here", ... } :return: True if event was handled, false otherwise (good to determine if parent processed while in a child event handler) """ event_type = event_dict.get("event", "") if event_type == "error": self._show_error(message=event_dict.get("message", "ERROR!"), title=event_dict.get("title", "Error...")) return True return False
def handle_event(self, event_dict): """ Handle an event coming from the controller or widgets/params :param event_dict: Dict containing event data. Only mandatory key is event name: {"event": "event_name_here", ... } :return: True if event was handled, false otherwise (good to determine if parent processed while in a child event handler) """ event_type = event_dict.get("event", "") if event_type == "error": self._show_error(message=event_dict.get("message", "ERROR!"), title=event_dict.get("title", "Error...")) return True return False
Python
def make_activity_panel(self, master): """ Generate frame containing all controls for activity. Default panel includes slider controls for all parameters. :param master: master for the frame :return: the frame created (e.g. for Notebook insertion) """ self._param_frame = ttk.Frame(master=master) for name, param in self._params.items(): if isinstance(param, NumericalActivityParam): self._param_ctrls[name] = self._gen_numerical_param_ctrl(self._param_frame, param) elif isinstance(param, FilenameActivityParam): self._param_ctrls[name] = self._gen_filename_param_ctrl(self._param_frame, param) elif isinstance(param, ButtonActivityParam): self._param_ctrls[name] = self._gen_button_param_ctrl(self._param_frame, param) self._param_frame.pack(side=Tk.TOP, fill=Tk.X) return self._param_frame
def make_activity_panel(self, master): """ Generate frame containing all controls for activity. Default panel includes slider controls for all parameters. :param master: master for the frame :return: the frame created (e.g. for Notebook insertion) """ self._param_frame = ttk.Frame(master=master) for name, param in self._params.items(): if isinstance(param, NumericalActivityParam): self._param_ctrls[name] = self._gen_numerical_param_ctrl(self._param_frame, param) elif isinstance(param, FilenameActivityParam): self._param_ctrls[name] = self._gen_filename_param_ctrl(self._param_frame, param) elif isinstance(param, ButtonActivityParam): self._param_ctrls[name] = self._gen_button_param_ctrl(self._param_frame, param) self._param_frame.pack(side=Tk.TOP, fill=Tk.X) return self._param_frame
Python
def draw_preview(self, ax): """ Draw a preview of the robot path on the given Matplotlib axis :param ax: matplotlib axis on which to draw. Externally provided """ raise NotImplementedError()
def draw_preview(self, ax): """ Draw a preview of the robot path on the given Matplotlib axis :param ax: matplotlib axis on which to draw. Externally provided """ raise NotImplementedError()
Python
def enumerate_index(conn, queries): """ Get all candidate indexes that can be added. Only enumerate up to 3 columns. """ table_name_results = run_query(conn, "SELECT tablename FROM pg_catalog.pg_tables where schemaname='public'") table_names = [tup[0] for tup in table_name_results] # Firstly filter by column usage in queries, then enumerate to up to 3 columns. index_candidates = set() for table_name in table_names: column_results = run_query(conn, f"SELECT column_name FROM information_schema.columns WHERE table_name='{table_name}'") columns = [] for tup in column_results: col_name = tup[0] # Use very conservative way so we determine the column not used in workloads. found_in_query = False for q in queries: # If the two words co-appear then cannot filter. if q.find(table_name) >= 0 and q.find(col_name) >= 0: found_in_query = True break if found_in_query: columns.append(col_name) for index_type in INDEX_TYPES: for col1 in columns: for col2 in columns: if col1 == col2: index_candidates.add((table_name, (col1,), index_type)) elif index_type != 'hash': # All the names must co-appear in one query to be considered. found_multi_in_query = False for q in queries: if q.find(table_name) >= 0\ and q.find(col1) >= 0\ and q.find(col2) >= 0: found_multi_in_query = True break if found_multi_in_query: index_candidates.add((table_name, (col1, col2), index_type)) # Enumerate 3 columns. for col1 in columns: for col2 in columns: for col3 in columns: if col1 == col2 or col2 == col3\ or col1 == col3 or index_type == 'hash': continue # All the names must co-appear in one query to be considered. found_multi_in_query = False for q in queries: if q.find(table_name) >= 0\ and q.find(col1) >= 0\ and q.find(col2) >= 0\ and q.find(col3) >= 0: found_multi_in_query = True break if found_multi_in_query: index_candidates.add((table_name, (col1, col2, col3), index_type)) # Substract the candiate set with all existing indexes. exist_indexes = set() index_results = run_query(conn, "SELECT tablename, indexdef from pg_indexes WHERE schemaname='public'") for table_name, index_statement in index_results: tokens = index_statement.split() print(tokens) idx = -1 while tokens[idx].lower() != 'using': idx -= 1 index_type = tokens[idx + 1] idx += 2 index_cols = [] while idx < 0: col = tokens[idx].replace(',', '').replace('(', '').replace(')', '') index_cols.append(col) idx += 1 exist_indexes.add((table_name, tuple(index_cols), index_type)) return index_candidates - exist_indexes
def enumerate_index(conn, queries): """ Get all candidate indexes that can be added. Only enumerate up to 3 columns. """ table_name_results = run_query(conn, "SELECT tablename FROM pg_catalog.pg_tables where schemaname='public'") table_names = [tup[0] for tup in table_name_results] # Firstly filter by column usage in queries, then enumerate to up to 3 columns. index_candidates = set() for table_name in table_names: column_results = run_query(conn, f"SELECT column_name FROM information_schema.columns WHERE table_name='{table_name}'") columns = [] for tup in column_results: col_name = tup[0] # Use very conservative way so we determine the column not used in workloads. found_in_query = False for q in queries: # If the two words co-appear then cannot filter. if q.find(table_name) >= 0 and q.find(col_name) >= 0: found_in_query = True break if found_in_query: columns.append(col_name) for index_type in INDEX_TYPES: for col1 in columns: for col2 in columns: if col1 == col2: index_candidates.add((table_name, (col1,), index_type)) elif index_type != 'hash': # All the names must co-appear in one query to be considered. found_multi_in_query = False for q in queries: if q.find(table_name) >= 0\ and q.find(col1) >= 0\ and q.find(col2) >= 0: found_multi_in_query = True break if found_multi_in_query: index_candidates.add((table_name, (col1, col2), index_type)) # Enumerate 3 columns. for col1 in columns: for col2 in columns: for col3 in columns: if col1 == col2 or col2 == col3\ or col1 == col3 or index_type == 'hash': continue # All the names must co-appear in one query to be considered. found_multi_in_query = False for q in queries: if q.find(table_name) >= 0\ and q.find(col1) >= 0\ and q.find(col2) >= 0\ and q.find(col3) >= 0: found_multi_in_query = True break if found_multi_in_query: index_candidates.add((table_name, (col1, col2, col3), index_type)) # Substract the candiate set with all existing indexes. exist_indexes = set() index_results = run_query(conn, "SELECT tablename, indexdef from pg_indexes WHERE schemaname='public'") for table_name, index_statement in index_results: tokens = index_statement.split() print(tokens) idx = -1 while tokens[idx].lower() != 'using': idx -= 1 index_type = tokens[idx + 1] idx += 2 index_cols = [] while idx < 0: col = tokens[idx].replace(',', '').replace('(', '').replace(')', '') index_cols.append(col) idx += 1 exist_indexes.add((table_name, tuple(index_cols), index_type)) return index_candidates - exist_indexes