language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def dump_service(call): """Handle calls to the dump service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.dump() else: _LOGGER.warning("{}: failed to do dump call since LightsControl is not running".format(DOMAIN))
def dump_service(call): """Handle calls to the dump service.""" global LIGHTS_CONTROL if LIGHTS_CONTROL is not None: LIGHTS_CONTROL.dump() else: _LOGGER.warning("{}: failed to do dump call since LightsControl is not running".format(DOMAIN))
Python
def _update_prefix(value, default_prefix): """ Append prefix into string or list's items there is no prefix""" if isinstance(value, str): return [value, default_prefix + value]['.' not in value] elif isinstance(value, (list, tuple)): return [[v, default_prefix + v]['.' not in v] for v in value] else: raise ValueError("Value type should be string/list/tuple (got {})".format(type(value)))
def _update_prefix(value, default_prefix): """ Append prefix into string or list's items there is no prefix""" if isinstance(value, str): return [value, default_prefix + value]['.' not in value] elif isinstance(value, (list, tuple)): return [[v, default_prefix + v]['.' not in v] for v in value] else: raise ValueError("Value type should be string/list/tuple (got {})".format(type(value)))
Python
def _update_keys_prefix(value, default_prefix): """ Append prefix into key if it don't contains prefix """ if not isinstance(value, dict): raise ValueError("Value type should be dict (got {})".format(type(value))) result = {LightsControlConfig._update_prefix(k, default_prefix): v for k, v in value.items()} return result
def _update_keys_prefix(value, default_prefix): """ Append prefix into key if it don't contains prefix """ if not isinstance(value, dict): raise ValueError("Value type should be dict (got {})".format(type(value))) result = {LightsControlConfig._update_prefix(k, default_prefix): v for k, v in value.items()} return result
Python
def load(self): """ Loads configuration. Previous configuration would be dropped completely""" self._entities = {} self._groups = {} self._configuration = {} if self._use_variable: h = self._h data = { 'on_state': value_get(h, self._on_state_var, {}), 'off_state': value_get(h, self._off_state_var, {}), 'power_save': value_get(h, self._power_save_var, {}), 'notify_turn_off': value_get(h, self._notify_turn_off_var, {}), 'switch_map': value_get(h, self._switch_map_var, {}), 'sensor_map': value_get(h, self._sensor_map_var, {}), 'sensor_timeout': value_get(h, self._sensor_timeout_var, 5), 'automation_map': value_get(h, self._automation_map_var, {}) } else: data = deepcopy(self._intial_data) self._parse_on_off_config(data['on_state'], is_on_state=True) self._parse_on_off_config(data['off_state'], is_on_state=False) self._parse_power_save_config(data['power_save']) self._parse_notify_config(data['notify_turn_off']) self._parse_switch_config(data['switch_map']) self._parse_sensor_default_timeout(data['sensor_timeout']) self._parse_sensor_config(data['sensor_map']) self._parse_automation_config(data['automation_map']) self.reload_groups()
def load(self): """ Loads configuration. Previous configuration would be dropped completely""" self._entities = {} self._groups = {} self._configuration = {} if self._use_variable: h = self._h data = { 'on_state': value_get(h, self._on_state_var, {}), 'off_state': value_get(h, self._off_state_var, {}), 'power_save': value_get(h, self._power_save_var, {}), 'notify_turn_off': value_get(h, self._notify_turn_off_var, {}), 'switch_map': value_get(h, self._switch_map_var, {}), 'sensor_map': value_get(h, self._sensor_map_var, {}), 'sensor_timeout': value_get(h, self._sensor_timeout_var, 5), 'automation_map': value_get(h, self._automation_map_var, {}) } else: data = deepcopy(self._intial_data) self._parse_on_off_config(data['on_state'], is_on_state=True) self._parse_on_off_config(data['off_state'], is_on_state=False) self._parse_power_save_config(data['power_save']) self._parse_notify_config(data['notify_turn_off']) self._parse_switch_config(data['switch_map']) self._parse_sensor_default_timeout(data['sensor_timeout']) self._parse_sensor_config(data['sensor_map']) self._parse_automation_config(data['automation_map']) self.reload_groups()
Python
def _parse_record(self, keywords, min_len, *args, **kwargs): """ Parses configuration record with positional OR keyworded args """ result = {} if not ALLOW_POSITIONAL_CONF and (len(args)) > 0: raise ValueError("Unexpected format: positional args are disabled!") if not (len(args) > 0 and len(kwargs) == 0 or len(args) == 0 and len(kwargs) > 0): raise ValueError("Unexpected format: " "Only 'all positional' or 'all keyworded' case is supported") if not (len(args) > 0 or len(kwargs) > 0): raise ValueError("Unexpected format: " "Excepted list or dict") if len(args) > 0: record = args else: record = kwargs if LOG_PARSING: self._log("Parsing record:\n format: {}\n data: {} ".format(pprint.pformat(keywords), pprint.pformat( record, indent=4))) if isinstance(record, (list, tuple)): for i in range(0, len(keywords)): if i < len(record): result[keywords[i][0]] = record[i] else: if i < min_len: raise ValueError("Unexpected format: " "Excepted at least {} positional arguments" " ({})".format(min_len, keywords[:min_len])) result[keywords[i][0]] = keywords[i][1] else: for i in range(0, len(keywords)): if i < min_len and keywords[i][0] not in record: raise ValueError("Unexpected format: " "Field {} is mandatory".format(keywords[i][0])) result[keywords[i][0]] = record.get(keywords[i][0], keywords[i][1]) for i in range(0, len(keywords)): f = keywords[i] if len(f) > 2 and f[2] is not None: value_check = False if isinstance(f[2], tuple) and not all(isinstance(item, type) for item in f[2]): assert not any(isinstance(item, type) for item in f[2]), "Mixed type/value check is not allowed!" value_check = True if not value_check: if not isinstance(result[f[0]], f[2]) and (i < min_len or result[f[0]] != f[1]): raise ValueError("Unexpected value type for {}: Expected: {}, got: {}".format( f[0], f[2], type(result[f[0]]))) else: if result[f[0]] not in f[2] and (i < min_len or result[f[0]] != f[1]): raise ValueError("Unexpected value for {}: Expected any of: {}, got: {}".format( f[0], f[2], result[f[0]])) if result[f[0]] is not None: if len(f) > 3 and f[3] is not None: result[f[0]] = LightsControlConfig._update_prefix(result[f[0]], f[3]) if len(f) > 4 and f[4] is not None and not isinstance(result[f[0]], f[4]): if f[4] == list: if isinstance(result[f[0]], tuple): result[f[0]] = list(result[f[0]]) else: result[f[0]] = [result[f[0]]] elif f[4] == tuple: if isinstance(result[f[0]], list): result[f[0]] = tuple(result[f[0]]) else: result[f[0]] = (result[f[0]]) else: result[f[0]] = f[4](result[f[0]]) return result
def _parse_record(self, keywords, min_len, *args, **kwargs): """ Parses configuration record with positional OR keyworded args """ result = {} if not ALLOW_POSITIONAL_CONF and (len(args)) > 0: raise ValueError("Unexpected format: positional args are disabled!") if not (len(args) > 0 and len(kwargs) == 0 or len(args) == 0 and len(kwargs) > 0): raise ValueError("Unexpected format: " "Only 'all positional' or 'all keyworded' case is supported") if not (len(args) > 0 or len(kwargs) > 0): raise ValueError("Unexpected format: " "Excepted list or dict") if len(args) > 0: record = args else: record = kwargs if LOG_PARSING: self._log("Parsing record:\n format: {}\n data: {} ".format(pprint.pformat(keywords), pprint.pformat( record, indent=4))) if isinstance(record, (list, tuple)): for i in range(0, len(keywords)): if i < len(record): result[keywords[i][0]] = record[i] else: if i < min_len: raise ValueError("Unexpected format: " "Excepted at least {} positional arguments" " ({})".format(min_len, keywords[:min_len])) result[keywords[i][0]] = keywords[i][1] else: for i in range(0, len(keywords)): if i < min_len and keywords[i][0] not in record: raise ValueError("Unexpected format: " "Field {} is mandatory".format(keywords[i][0])) result[keywords[i][0]] = record.get(keywords[i][0], keywords[i][1]) for i in range(0, len(keywords)): f = keywords[i] if len(f) > 2 and f[2] is not None: value_check = False if isinstance(f[2], tuple) and not all(isinstance(item, type) for item in f[2]): assert not any(isinstance(item, type) for item in f[2]), "Mixed type/value check is not allowed!" value_check = True if not value_check: if not isinstance(result[f[0]], f[2]) and (i < min_len or result[f[0]] != f[1]): raise ValueError("Unexpected value type for {}: Expected: {}, got: {}".format( f[0], f[2], type(result[f[0]]))) else: if result[f[0]] not in f[2] and (i < min_len or result[f[0]] != f[1]): raise ValueError("Unexpected value for {}: Expected any of: {}, got: {}".format( f[0], f[2], result[f[0]])) if result[f[0]] is not None: if len(f) > 3 and f[3] is not None: result[f[0]] = LightsControlConfig._update_prefix(result[f[0]], f[3]) if len(f) > 4 and f[4] is not None and not isinstance(result[f[0]], f[4]): if f[4] == list: if isinstance(result[f[0]], tuple): result[f[0]] = list(result[f[0]]) else: result[f[0]] = [result[f[0]]] elif f[4] == tuple: if isinstance(result[f[0]], list): result[f[0]] = tuple(result[f[0]]) else: result[f[0]] = (result[f[0]]) else: result[f[0]] = f[4](result[f[0]]) return result
Python
def _validated_time(data, nested=None): """ Validates time entry and converts it to _ChoppedTime """ if nested is False: if not isinstance(data, (list, tuple)): return None if len(data) != 2: return None data = list(data) for i in range(0, 2): if not isinstance(data[i], str): return None data[i] = data[i].strip() m = re.search(r"^(\d\d):(\d\d):(\d\d)$", data[i]) if m is None: return None hh, mm, ss = m.groups() if int(hh) >= 24: return None if int(mm) >= 60: return None if int(ss) >= 60: return None data[i] = _ChoppedTime(data[i]) return data elif nested is True: if not isinstance(data, (list, tuple)): return None data = list(data) if len(data) == 0: return None for i in range(0, len(data)): new_value = LightsControlConfig._validated_time(data[i], nested=False) if new_value is None: return None data[i] = new_value return data else: new_value = LightsControlConfig._validated_time(data, nested=False) if new_value is None: new_value = LightsControlConfig._validated_time(data, nested=True) else: new_value = [new_value] return new_value
def _validated_time(data, nested=None): """ Validates time entry and converts it to _ChoppedTime """ if nested is False: if not isinstance(data, (list, tuple)): return None if len(data) != 2: return None data = list(data) for i in range(0, 2): if not isinstance(data[i], str): return None data[i] = data[i].strip() m = re.search(r"^(\d\d):(\d\d):(\d\d)$", data[i]) if m is None: return None hh, mm, ss = m.groups() if int(hh) >= 24: return None if int(mm) >= 60: return None if int(ss) >= 60: return None data[i] = _ChoppedTime(data[i]) return data elif nested is True: if not isinstance(data, (list, tuple)): return None data = list(data) if len(data) == 0: return None for i in range(0, len(data)): new_value = LightsControlConfig._validated_time(data[i], nested=False) if new_value is None: return None data[i] = new_value return data else: new_value = LightsControlConfig._validated_time(data, nested=False) if new_value is None: new_value = LightsControlConfig._validated_time(data, nested=True) else: new_value = [new_value] return new_value
Python
def _update_entities(self, entities_list, kind, group=None): """ Updates list of entities mentioned in configuration. Extrapolates all groups """ for item in entities_list: if item[:6] != "group.": if group is None: group = "_self_" if item not in self._entities: self._entities[item] = {'kind': {group: [kind]}, 'groups': [group]} else: if group not in self._entities[item]['kind']: self._entities[item]['kind'][group] = [kind] elif kind not in self._entities[item]['kind'][group]: self._entities[item]['kind'][group].append(kind) if group not in self._entities[item]['groups']: self._entities[item]['groups'].append(group) else: if group is not None: raise ValueError("LightsControl: group '{}' is registered with not None group argument".format( item)) if item not in self._groups: self._groups[item] = {'kind': [kind], 'entities': []} else: if kind not in self._groups[item]['kind']: self._groups[item]['kind'].append(kind)
def _update_entities(self, entities_list, kind, group=None): """ Updates list of entities mentioned in configuration. Extrapolates all groups """ for item in entities_list: if item[:6] != "group.": if group is None: group = "_self_" if item not in self._entities: self._entities[item] = {'kind': {group: [kind]}, 'groups': [group]} else: if group not in self._entities[item]['kind']: self._entities[item]['kind'][group] = [kind] elif kind not in self._entities[item]['kind'][group]: self._entities[item]['kind'][group].append(kind) if group not in self._entities[item]['groups']: self._entities[item]['groups'].append(group) else: if group is not None: raise ValueError("LightsControl: group '{}' is registered with not None group argument".format( item)) if item not in self._groups: self._groups[item] = {'kind': [kind], 'entities': []} else: if kind not in self._groups[item]['kind']: self._groups[item]['kind'].append(kind)
Python
def _reload_group(self, name, entities): """ Updates data about group content and entity-group relations """ exclude_from_group = [e for e in self._groups[name]['entities'] if e not in entities] self._groups[name]['entities'] = entities for entity in exclude_from_group: if entity not in self._entities: # TODO: check with assert as this case shouldn't exist if all is OK continue self._entities[entity]['groups'] = list(set([g for g in self._entities[entity]['groups'] if g != name])) if name in self._entities[entity]['kind']: del self._entities[entity]['kind'][name] for kind in self._groups[name]['kind']: self._update_entities(entities, kind, name)
def _reload_group(self, name, entities): """ Updates data about group content and entity-group relations """ exclude_from_group = [e for e in self._groups[name]['entities'] if e not in entities] self._groups[name]['entities'] = entities for entity in exclude_from_group: if entity not in self._entities: # TODO: check with assert as this case shouldn't exist if all is OK continue self._entities[entity]['groups'] = list(set([g for g in self._entities[entity]['groups'] if g != name])) if name in self._entities[entity]['kind']: del self._entities[entity]['kind'][name] for kind in self._groups[name]['kind']: self._update_entities(entities, kind, name)
Python
def _get_group_entities(self, group): """ Get's all entities that are in specified group """ state = value_get_full(self._h, group, None) if state is None: self._error("Failed to find group '{}'".format(group)) return [] value = state['attributes'].get('entity_id', None) if value is None: self._error("Failed to get entities of group '{}'".format(group)) return [] if not isinstance(value, (str, list, tuple)): self._error("Unexpected format for entities of group '{}'".format(group)) return [] if isinstance(value, str): value = [value] return list(value)
def _get_group_entities(self, group): """ Get's all entities that are in specified group """ state = value_get_full(self._h, group, None) if state is None: self._error("Failed to find group '{}'".format(group)) return [] value = state['attributes'].get('entity_id', None) if value is None: self._error("Failed to get entities of group '{}'".format(group)) return [] if not isinstance(value, (str, list, tuple)): self._error("Unexpected format for entities of group '{}'".format(group)) return [] if isinstance(value, str): value = [value] return list(value)
Python
def reload_groups(self): """ Reloads groups and updates entities list to maintain objects coherecny """ for k in self._groups: entities = self._get_group_entities(k) self._reload_group(k, entities)
def reload_groups(self): """ Reloads groups and updates entities list to maintain objects coherecny """ for k in self._groups: entities = self._get_group_entities(k) self._reload_group(k, entities)
Python
def entities_list(self, kind, names, chain=None): """ Returns all entities of specified kind that were mentioned in configuration for specified group. Extrapolates nestsed groups """ el = [] if chain is None: chain = [] if len(chain) >= 10: self._error("LightsControl: Looks like there is groups loop or excessive groups nesting: {}." " Stopping recursion!".format('->'.join(chain))) return el if isinstance(names, str): names = [names] for item in names: if item in chain: continue if isinstance(item, (list, tuple)): el += self.entities_list(kind, item, chain) elif item in self._entities and self._entity_is_kind_of(item, kind): el.append(item) elif item in self._groups: el += self.entities_list(kind, self._groups[item]['entities'], chain+[item]) return el
def entities_list(self, kind, names, chain=None): """ Returns all entities of specified kind that were mentioned in configuration for specified group. Extrapolates nestsed groups """ el = [] if chain is None: chain = [] if len(chain) >= 10: self._error("LightsControl: Looks like there is groups loop or excessive groups nesting: {}." " Stopping recursion!".format('->'.join(chain))) return el if isinstance(names, str): names = [names] for item in names: if item in chain: continue if isinstance(item, (list, tuple)): el += self.entities_list(kind, item, chain) elif item in self._entities and self._entity_is_kind_of(item, kind): el.append(item) elif item in self._groups: el += self.entities_list(kind, self._groups[item]['entities'], chain+[item]) return el
Python
def entities_of_kind(self, kind): """ Returns list of all entities of specified kind""" result = [] for k in self._entities.keys(): if self._entity_is_kind_of(k, kind): result.append(k) return result
def entities_of_kind(self, kind): """ Returns list of all entities of specified kind""" result = [] for k in self._entities.keys(): if self._entity_is_kind_of(k, kind): result.append(k) return result
Python
def _parse_rules_map(self, rules_map, map_name, key_field, parse_method, list_allowed): """ Parses complicated dictionary config (in which value is list of list records or dict records Returns normalized dict of rules """ if LOG_PARSING: self._log("Parsing rules_map:\n map_name={}\n key_field={}\n parse_method={}\n" " data: {} ".format(map_name, key_field, parse_method, pprint.pformat(rules_map, indent=4))) result = {} if not isinstance(rules_map, dict): self._error("LightsControl: Unexpected {} format:" " 'dict' expected".format(map_name)) return {} for k, v in rules_map.items(): if not isinstance(v, (list, tuple)): self._error("LightsControl: Unexpected {} format for '{}' rule:" " 'list' expected".format(map_name, k)) continue rules = [] for item in v: if isinstance(item, (list, tuple)): rule = parse_method(k, *item) if not isinstance(rule, list): rules.append(rule) else: rules += rule elif isinstance(item, dict): rule = parse_method(k, **item) if not isinstance(rule, list): rules.append(rule) else: rules += rule else: self._error("LightsControl: Unexpected {} format for '{}' rule:" " 'list' or 'dict' expected".format(map_name, k)) continue for rule in rules: if rule is None: continue if key_field != list: key = rule[key_field] else: key = k if key not in result: result[key] = [] result[key].append(rule) return result
def _parse_rules_map(self, rules_map, map_name, key_field, parse_method, list_allowed): """ Parses complicated dictionary config (in which value is list of list records or dict records Returns normalized dict of rules """ if LOG_PARSING: self._log("Parsing rules_map:\n map_name={}\n key_field={}\n parse_method={}\n" " data: {} ".format(map_name, key_field, parse_method, pprint.pformat(rules_map, indent=4))) result = {} if not isinstance(rules_map, dict): self._error("LightsControl: Unexpected {} format:" " 'dict' expected".format(map_name)) return {} for k, v in rules_map.items(): if not isinstance(v, (list, tuple)): self._error("LightsControl: Unexpected {} format for '{}' rule:" " 'list' expected".format(map_name, k)) continue rules = [] for item in v: if isinstance(item, (list, tuple)): rule = parse_method(k, *item) if not isinstance(rule, list): rules.append(rule) else: rules += rule elif isinstance(item, dict): rule = parse_method(k, **item) if not isinstance(rule, list): rules.append(rule) else: rules += rule else: self._error("LightsControl: Unexpected {} format for '{}' rule:" " 'list' or 'dict' expected".format(map_name, k)) continue for rule in rules: if rule is None: continue if key_field != list: key = rule[key_field] else: key = k if key not in result: result[key] = [] result[key].append(rule) return result
Python
def _expand_map_record(self, config_name, record, key_fields, kind, list_wanted, result): """ Updates dict with values specified in key_field as key referencing ccresponding rules record """ entities = [] for k in key_fields: if kind is not None: entities += self.entities_list(kind, record[k]) else: entities += record[k] entities = list(set(entities)) for entity in entities: if list_wanted: if entity not in result: result[entity] = [] result[entity].append(record) else: if entity not in result: result[entity] = record else: result[entity] = None self._error("LightsControl: overlapping records for {} in '{}'".format(entity, config_name))
def _expand_map_record(self, config_name, record, key_fields, kind, list_wanted, result): """ Updates dict with values specified in key_field as key referencing ccresponding rules record """ entities = [] for k in key_fields: if kind is not None: entities += self.entities_list(kind, record[k]) else: entities += record[k] entities = list(set(entities)) for entity in entities: if list_wanted: if entity not in result: result[entity] = [] result[entity].append(record) else: if entity not in result: result[entity] = record else: result[entity] = None self._error("LightsControl: overlapping records for {} in '{}'".format(entity, config_name))
Python
def _expand_map(self, config_name, data, key_fields, kind, list_wanted, go_deeper=False): """ Returns dict with values specified in key_field as key referencing corresponding rules record """ result = {} for k, v in data.items(): if go_deeper: records = v else: records = [v] for record in records: self._expand_map_record(config_name, record, key_fields, kind, list_wanted, result) result = {k: v for k, v in result.items() if v is not None} return result
def _expand_map(self, config_name, data, key_fields, kind, list_wanted, go_deeper=False): """ Returns dict with values specified in key_field as key referencing corresponding rules record """ result = {} for k, v in data.items(): if go_deeper: records = v else: records = [v] for record in records: self._expand_map_record(config_name, record, key_fields, kind, list_wanted, result) result = {k: v for k, v in result.items() if v is not None} return result
Python
def _remap(self, config_name, data, key_fields, kind, list_wanted, go_deeper=False): """ Returns dict with values specified in key_field as second level key referencing corresponding rules record """ result = {} for k, v in data.items(): if go_deeper: records = v else: records = [v] result[k] = {} for record in records: self._expand_map_record(config_name, record, key_fields, kind, list_wanted, result[k]) result[k] = {m: v for m, v in result[k].items() if v is not None} return result
def _remap(self, config_name, data, key_fields, kind, list_wanted, go_deeper=False): """ Returns dict with values specified in key_field as second level key referencing corresponding rules record """ result = {} for k, v in data.items(): if go_deeper: records = v else: records = [v] result[k] = {} for record in records: self._expand_map_record(config_name, record, key_fields, kind, list_wanted, result[k]) result[k] = {m: v for m, v in result[k].items() if v is not None} return result
Python
def restart(self): """ Totally restarts component - reloads configuration, reinitiates context """ self._inited = False self._scheduled_calls = [] self.context = {} self.sensor_timeout = {} self.on_state = {} self.off_state = {} self.automation_map = {} self.switch_events = {} self.sensor_map = {} self.sensor_lights = {} self.power_save = {} self.notify_turn_off = {} errors = False try: self._config.load() except Exception as e: self._error("LightsControl failed to start due to exception while parsing configuration: {}".format(e)) errors = True config = self._config.as_dict self.sensor_timeout = config['sensor_timeout'] self.reload_groups() self._inited = not errors if LOG_PARSING and not errors: self._log(" switch_events:" + pprint.pformat(self.switch_events, indent=4)) self._log(" sensor_map:" + pprint.pformat(self.sensor_map, indent=4)) self._log(" sensor_lights:" + pprint.pformat(self.sensor_lights, indent=4)) self._log(" sensor_timeout:" + pprint.pformat(self.sensor_timeout, indent=4)) self._log(" on_state:" + pprint.pformat(self.on_state, indent=4)) self._log(" off_state:" + pprint.pformat(self.off_state, indent=4)) self._log(" automation_map:" + pprint.pformat(self.automation_map, indent=4)) self._log(" sensor_lights:" + pprint.pformat(self.automation_map, indent=4)) self._log(" power_save:" + pprint.pformat(self.automation_map, indent=4)) self._log(" notify_turn_off:" + pprint.pformat(self.automation_map, indent=4))
def restart(self): """ Totally restarts component - reloads configuration, reinitiates context """ self._inited = False self._scheduled_calls = [] self.context = {} self.sensor_timeout = {} self.on_state = {} self.off_state = {} self.automation_map = {} self.switch_events = {} self.sensor_map = {} self.sensor_lights = {} self.power_save = {} self.notify_turn_off = {} errors = False try: self._config.load() except Exception as e: self._error("LightsControl failed to start due to exception while parsing configuration: {}".format(e)) errors = True config = self._config.as_dict self.sensor_timeout = config['sensor_timeout'] self.reload_groups() self._inited = not errors if LOG_PARSING and not errors: self._log(" switch_events:" + pprint.pformat(self.switch_events, indent=4)) self._log(" sensor_map:" + pprint.pformat(self.sensor_map, indent=4)) self._log(" sensor_lights:" + pprint.pformat(self.sensor_lights, indent=4)) self._log(" sensor_timeout:" + pprint.pformat(self.sensor_timeout, indent=4)) self._log(" on_state:" + pprint.pformat(self.on_state, indent=4)) self._log(" off_state:" + pprint.pformat(self.off_state, indent=4)) self._log(" automation_map:" + pprint.pformat(self.automation_map, indent=4)) self._log(" sensor_lights:" + pprint.pformat(self.automation_map, indent=4)) self._log(" power_save:" + pprint.pformat(self.automation_map, indent=4)) self._log(" notify_turn_off:" + pprint.pformat(self.automation_map, indent=4))
Python
def reload_groups(self): """ Reloads groups and updates entity-rule mapping accordingly """ self._config.reload_groups() # Update other config data as it depends on groups (groups are extrapolated) self.on_state = self._config.on_state_lights() self.off_state = self._config.off_state_lights() self.automation_map = self._config.automation_map_lights() self.switch_events = self._config.switch_events() self.sensor_map = self._config.sensor_remap() self.sensor_lights = self._config.sensor_lights() self.power_save = self._config.power_save_lights() self.notify_turn_off = self._config.notify_turn_off_lights() if isinstance(self._hass, HassMock): lights = self._config.all_lights sensors = self._config.entities_of_kind('sensor') sensors = {k: STATE_OFF for k in sensors} automations = self._config.entities_of_kind('automation') self._hass.init_state(lights, sensors, automations) self._update_context()
def reload_groups(self): """ Reloads groups and updates entity-rule mapping accordingly """ self._config.reload_groups() # Update other config data as it depends on groups (groups are extrapolated) self.on_state = self._config.on_state_lights() self.off_state = self._config.off_state_lights() self.automation_map = self._config.automation_map_lights() self.switch_events = self._config.switch_events() self.sensor_map = self._config.sensor_remap() self.sensor_lights = self._config.sensor_lights() self.power_save = self._config.power_save_lights() self.notify_turn_off = self._config.notify_turn_off_lights() if isinstance(self._hass, HassMock): lights = self._config.all_lights sensors = self._config.entities_of_kind('sensor') sensors = {k: STATE_OFF for k in sensors} automations = self._config.entities_of_kind('automation') self._hass.init_state(lights, sensors, automations) self._update_context()
Python
def __light_is_on(state, off_state): """ Returns True if state is not STATE_OFF and is not same as off_state""" return state['state'] != STATE_OFF and \ state['state'] != off_state['state'] and \ ('brightness' not in off_state or state['brightness'] != off_state['brightness'])
def __light_is_on(state, off_state): """ Returns True if state is not STATE_OFF and is not same as off_state""" return state['state'] != STATE_OFF and \ state['state'] != off_state['state'] and \ ('brightness' not in off_state or state['brightness'] != off_state['brightness'])
Python
def _light_is_on(self, light, time_now): """ Returns True if light is on it it's state (brightness) is not same as off_state""" state = light_state(self._h, light) if state is None: return False _os = self._off_state(light, time_now=time_now) return self.__light_is_on(state, _os)
def _light_is_on(self, light, time_now): """ Returns True if light is on it it's state (brightness) is not same as off_state""" state = light_state(self._h, light) if state is None: return False _os = self._off_state(light, time_now=time_now) return self.__light_is_on(state, _os)
Python
def _update_context(self): """ Updates context - includes lights from configuration and initates their context according to their state removes from context lights that are no longer in configuration """ lights = self._config.all_lights time_now = self._time_now() seconds_now = self._time_to_seconds(time_now) for light in lights: if light not in self.context: state = light_state(self._h, light) if state is None: continue _os = self._off_state(light, time_now=time_now) # TODO: make context a class self.context[light] = \ {'is_on': int(self.__light_is_on(state, _os)), # Current on state: # * -1 - customized off (off_state won't be changed by watchdog) # * 0 - off, # * 1 - on, # * 2 - customized on (on_state won't be changed by watchdog), 'switch_off': -1, # Time to switch off light, in seconds. If negative - don't switch off 'notify': -1, # Time to indicate that light would be switched off soon, in seconds 'notified': False, # Used to determine if switch off notification action were already fired 'last_action_secs': seconds_now, # Used to distinguish actions by LightsControl # from another light actions # (time in seconds when last action by LightsControl were committed) 'last_activity_secs': seconds_now # Used to determine if power saving should be activated } for light in list(self.context.keys()): if light not in lights: del self.context[light] self._save_context()
def _update_context(self): """ Updates context - includes lights from configuration and initates their context according to their state removes from context lights that are no longer in configuration """ lights = self._config.all_lights time_now = self._time_now() seconds_now = self._time_to_seconds(time_now) for light in lights: if light not in self.context: state = light_state(self._h, light) if state is None: continue _os = self._off_state(light, time_now=time_now) # TODO: make context a class self.context[light] = \ {'is_on': int(self.__light_is_on(state, _os)), # Current on state: # * -1 - customized off (off_state won't be changed by watchdog) # * 0 - off, # * 1 - on, # * 2 - customized on (on_state won't be changed by watchdog), 'switch_off': -1, # Time to switch off light, in seconds. If negative - don't switch off 'notify': -1, # Time to indicate that light would be switched off soon, in seconds 'notified': False, # Used to determine if switch off notification action were already fired 'last_action_secs': seconds_now, # Used to distinguish actions by LightsControl # from another light actions # (time in seconds when last action by LightsControl were committed) 'last_activity_secs': seconds_now # Used to determine if power saving should be activated } for light in list(self.context.keys()): if light not in lights: del self.context[light] self._save_context()
Python
def _push_context(self): """ For debug purposes only - flushes context changes into external variable """ # NOTE: no selective context update since it's full fledged component now. Does full dump self._save_context() return
def _push_context(self): """ For debug purposes only - flushes context changes into external variable """ # NOTE: no selective context update since it's full fledged component now. Does full dump self._save_context() return
Python
def _time_in_range(self, time_range, value=None): """ Ensures that given time falls into specified range. If value isn't specified then current time used """ r = [] vr = self._validate_time_range(time_range) if vr == _VALID_TIMERANGE_MULTI: return any(self._time_in_range(item, value) for item in time_range) r.append(_ChoppedTime(time_range[0])) r.append(_ChoppedTime(time_range[1])) if value is None: value = self._time_now() value = _ChoppedTime(value) if r[0].value <= r[1].value: if r[0].value <= value.value <= r[1].value: return True else: return False else: if value.value >= r[0].value or value.value <= r[1].value: return True else: return False
def _time_in_range(self, time_range, value=None): """ Ensures that given time falls into specified range. If value isn't specified then current time used """ r = [] vr = self._validate_time_range(time_range) if vr == _VALID_TIMERANGE_MULTI: return any(self._time_in_range(item, value) for item in time_range) r.append(_ChoppedTime(time_range[0])) r.append(_ChoppedTime(time_range[1])) if value is None: value = self._time_now() value = _ChoppedTime(value) if r[0].value <= r[1].value: if r[0].value <= value.value <= r[1].value: return True else: return False else: if value.value >= r[0].value or value.value <= r[1].value: return True else: return False
Python
def _automation_is_active(self, name): """ Ensures that automation for specified entity is active """ if name not in self.automation_map: return True if name not in self.context: return False return value_get(self._h, self.automation_map[name], STATE_ON) == STATE_ON
def _automation_is_active(self, name): """ Ensures that automation for specified entity is active """ if name not in self.automation_map: return True if name not in self.context: return False return value_get(self._h, self.automation_map[name], STATE_ON) == STATE_ON
Python
def _off_state(self, light, time_now): """ Returns OFF state for specified light according to specified/current time """ _os = {'state': STATE_OFF} if not self._automation_is_active(light): return _os if light not in self.context: return _os if light in self.off_state: states = self.off_state[light] for s in states: if 'when' in s: if self._time_in_range(s['when'], time_now): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_OFF) if s['state'] != STATE_OFF: _os = {'state': STATE_ON, 'brightness': 255} if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] break else: value = value_get(self._h, s['sensor']) if value is not None and self._value_is_within_range(value, s['value']): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_OFF) if s['state'] != STATE_OFF: _os = {'state': STATE_ON, 'brightness': 255} if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] break return _os
def _off_state(self, light, time_now): """ Returns OFF state for specified light according to specified/current time """ _os = {'state': STATE_OFF} if not self._automation_is_active(light): return _os if light not in self.context: return _os if light in self.off_state: states = self.off_state[light] for s in states: if 'when' in s: if self._time_in_range(s['when'], time_now): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_OFF) if s['state'] != STATE_OFF: _os = {'state': STATE_ON, 'brightness': 255} if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] break else: value = value_get(self._h, s['sensor']) if value is not None and self._value_is_within_range(value, s['value']): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_OFF) if s['state'] != STATE_OFF: _os = {'state': STATE_ON, 'brightness': 255} if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] break return _os
Python
def _on_state(self, light, time_now): """ Returns ON state for specified light according to specified/current time """ _os = {'state': STATE_ON, 'brightness': 255} if not self._automation_is_active(light): return _os if light not in self.context: return _os if light in self.on_state: states = self.on_state[light] for s in states: if 'when' in s: if self._time_in_range(s['when'], time_now): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_ON) if s['state'] != STATE_OFF: if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] else: _os = {'state': STATE_OFF} break else: value = value_get(self._h, s['sensor']) if value is not None and self._value_is_within_range(value, s['value']): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_ON) if s['state'] != STATE_OFF: if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] else: _os = {'state': STATE_OFF} break return _os
def _on_state(self, light, time_now): """ Returns ON state for specified light according to specified/current time """ _os = {'state': STATE_ON, 'brightness': 255} if not self._automation_is_active(light): return _os if light not in self.context: return _os if light in self.on_state: states = self.on_state[light] for s in states: if 'when' in s: if self._time_in_range(s['when'], time_now): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_ON) if s['state'] != STATE_OFF: if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] else: _os = {'state': STATE_OFF} break else: value = value_get(self._h, s['sensor']) if value is not None and self._value_is_within_range(value, s['value']): if isinstance(s['state'], str) and s['state'] not in (STATE_ON, STATE_OFF): s['state'] = value_get(self._h, s['state'], STATE_ON) if s['state'] != STATE_OFF: if s['state'] != STATE_ON and isinstance(s['state'], int) and 0 <= s['state'] <= 255: _os['brightness'] = s['state'] else: _os = {'state': STATE_OFF} break return _os
Python
def on_light_change(self, light, seconds_now=None): """ Updates current light state Main use is for case if light were turned on/off by GUI or some other means, not by LightsControl """ if self._inited is None: self._log("LightsControl is loading configuration") self.restart() if not self._inited: return if INFIELD_DEBUG: self._warning("on_light_change({}, {})".format(light, seconds_now)) if light not in self.context: if INFIELD_DEBUG: self._warning(" entity {} not in context".format(light)) return if seconds_now is None or seconds_now == '': seconds_now = self._time_to_seconds(self._time_now()) else: seconds_now = float(seconds_now) if self.context[light]['last_action_secs'] + 2 < seconds_now: # NOTE: 2 seconds margin for self's action ls = light_state(self._h, light) if ls is None: return if INFIELD_DEBUG: self._warning(" {} state is {}".format(light, ls)) if ls['state'] == STATE_ON: if INFIELD_DEBUG: self._warning(" {} ON state is {}".format(light, self._on_state(light, self._time_now()))) self.context[light]['is_on'] = [1, 2][ls != self._on_state(light, self._time_now())] else: ls = {'state': STATE_OFF} if INFIELD_DEBUG: self._warning(" {} OFF state is {}".format(light, self._off_state(light, self._time_now()))) self.context[light]['is_on'] = [0, -1][ls != self._off_state(light, self._time_now())] self.context[light]['switch_off'] = -1 self.context[light]['notify'] = -1 self.context[light]['notified'] = False if self.context[light]['is_on'] > 0: self.context[light]['last_activity_secs'] = seconds_now self._push_context() if INFIELD_DEBUG: self._warning(" accepted external state change. now it's: {}".format(self.context[light])) else: if INFIELD_DEBUG: self._warning(" reject state change as it's close to self's recent action" " ({} +2 >= {})".format(self.context[light]['last_action_secs'], seconds_now))
def on_light_change(self, light, seconds_now=None): """ Updates current light state Main use is for case if light were turned on/off by GUI or some other means, not by LightsControl """ if self._inited is None: self._log("LightsControl is loading configuration") self.restart() if not self._inited: return if INFIELD_DEBUG: self._warning("on_light_change({}, {})".format(light, seconds_now)) if light not in self.context: if INFIELD_DEBUG: self._warning(" entity {} not in context".format(light)) return if seconds_now is None or seconds_now == '': seconds_now = self._time_to_seconds(self._time_now()) else: seconds_now = float(seconds_now) if self.context[light]['last_action_secs'] + 2 < seconds_now: # NOTE: 2 seconds margin for self's action ls = light_state(self._h, light) if ls is None: return if INFIELD_DEBUG: self._warning(" {} state is {}".format(light, ls)) if ls['state'] == STATE_ON: if INFIELD_DEBUG: self._warning(" {} ON state is {}".format(light, self._on_state(light, self._time_now()))) self.context[light]['is_on'] = [1, 2][ls != self._on_state(light, self._time_now())] else: ls = {'state': STATE_OFF} if INFIELD_DEBUG: self._warning(" {} OFF state is {}".format(light, self._off_state(light, self._time_now()))) self.context[light]['is_on'] = [0, -1][ls != self._off_state(light, self._time_now())] self.context[light]['switch_off'] = -1 self.context[light]['notify'] = -1 self.context[light]['notified'] = False if self.context[light]['is_on'] > 0: self.context[light]['last_activity_secs'] = seconds_now self._push_context() if INFIELD_DEBUG: self._warning(" accepted external state change. now it's: {}".format(self.context[light])) else: if INFIELD_DEBUG: self._warning(" reject state change as it's close to self's recent action" " ({} +2 >= {})".format(self.context[light]['last_action_secs'], seconds_now))
Python
def finish_populating_widget_popup(self, widget, popup): """ A right click menu is about to be shown. (IDA 7) """ inject_callee_actions(widget, popup, idaapi.get_widget_type(widget)) return 0
def finish_populating_widget_popup(self, widget, popup): """ A right click menu is about to be shown. (IDA 7) """ inject_callee_actions(widget, popup, idaapi.get_widget_type(widget)) return 0
Python
def prior(kernel_size, bias_size): #removed dtype=None, unused argument """define the prior weight distribution as Normal of mean=0 and stddev=1""" number = kernel_size + bias_size prior_model = keras.Sequential( [ tfp.layers.DistributionLambda( lambda t: tfp.distributions.MultivariateNormalDiag( loc=tf.zeros(number), scale_diag=tf.ones(number) ) ) ] ) return prior_model
def prior(kernel_size, bias_size): #removed dtype=None, unused argument """define the prior weight distribution as Normal of mean=0 and stddev=1""" number = kernel_size + bias_size prior_model = keras.Sequential( [ tfp.layers.DistributionLambda( lambda t: tfp.distributions.MultivariateNormalDiag( loc=tf.zeros(number), scale_diag=tf.ones(number) ) ) ] ) return prior_model
Python
def posterior(kernel_size, bias_size, dtype=None): """define variational posterior weight distribution as multivariate Gaussian learnable parameters are means, variances, and covariances""" number = kernel_size + bias_size posterior_model = keras.Sequential( [ tfp.layers.VariableLayer( tfp.layers.MultivariateNormalTriL.params_size(number), dtype=dtype ), tfp.layers.MultivariateNormalTriL(number), ] ) return posterior_model
def posterior(kernel_size, bias_size, dtype=None): """define variational posterior weight distribution as multivariate Gaussian learnable parameters are means, variances, and covariances""" number = kernel_size + bias_size posterior_model = keras.Sequential( [ tfp.layers.VariableLayer( tfp.layers.MultivariateNormalTriL.params_size(number), dtype=dtype ), tfp.layers.MultivariateNormalTriL(number), ] ) return posterior_model
Python
def create_model_outputs_prob(target_names,features): """create layer with mean and std for all outputs""" # Create a probabilistic output (Normal distribution), and use the `Dense` layer # to produce the parameters of the distribution. # We set units=2 to learn both the mean and the variance of the Normal distribution. outputs = [] for target_name in target_names: distribution_params = layers.Dense(units=2, name = target_name+'_params')(features) outputs.append(tfp.layers.IndependentNormal(1,name=target_name,)(distribution_params)) return outputs
def create_model_outputs_prob(target_names,features): """create layer with mean and std for all outputs""" # Create a probabilistic output (Normal distribution), and use the `Dense` layer # to produce the parameters of the distribution. # We set units=2 to learn both the mean and the variance of the Normal distribution. outputs = [] for target_name in target_names: distribution_params = layers.Dense(units=2, name = target_name+'_params')(features) outputs.append(tfp.layers.IndependentNormal(1,name=target_name,)(distribution_params)) return outputs
Python
def create_deterministic_nn(feature_names, target_names, hidden_units, name = 'DNN', out = 'D'): """function for deterministic neural network based on Nagrath""" inputs = create_model_inputs(feature_names) features = inputs #layers.concatenate(list(inputs.values())) # Create hidden layers using the Dense layer. for units in hidden_units: features = layers.Dense( units=units, activation="sigmoid", )(features) if out == 'D': outputs = create_model_outputs_det(target_names,features) if out == 'P': outputs = create_model_outputs_prob(target_names,features) model = keras.Model(inputs=inputs, outputs=outputs, name = name) return model
def create_deterministic_nn(feature_names, target_names, hidden_units, name = 'DNN', out = 'D'): """function for deterministic neural network based on Nagrath""" inputs = create_model_inputs(feature_names) features = inputs #layers.concatenate(list(inputs.values())) # Create hidden layers using the Dense layer. for units in hidden_units: features = layers.Dense( units=units, activation="sigmoid", )(features) if out == 'D': outputs = create_model_outputs_det(target_names,features) if out == 'P': outputs = create_model_outputs_prob(target_names,features) model = keras.Model(inputs=inputs, outputs=outputs, name = name) return model
Python
def preprocessing(arrays, standarize = False, bounds = dict(), skip = None): """Preprocess the data before doing any modeling. Parameters ---------- *arrays : list List with pd.DataFrames. First element should be train set. standarize : bool, default = True Standardize features by removing the mean and scaling to unit variance bounds : dict, default = {} Dicitionary spcecifying cutoff min and max values for variables skip : list, default = None Variables to ommit from the preprocessing Returns ------- preprocessed: List with pd.DataFrames """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") if standarize: scaler = StandardScaler().fit(arrays[0]) preprocessed = [] for i in arrays: # Cutoff or filter if len(bounds) > 0: for k, bon in bounds.items(): i = i.loc[(i[k] >= bon[0]) & (i[k] <= bon[1])] # Scaling if standarize: preprocessed.append(pd.DataFrame(scaler.transform(i), index=i.index, columns=i.columns)) else: preprocessed.append(i) if skip: for k in skip: preprocessed[-1][k] = i[k] if standarize: preprocessed.append(scaler) return preprocessed
def preprocessing(arrays, standarize = False, bounds = dict(), skip = None): """Preprocess the data before doing any modeling. Parameters ---------- *arrays : list List with pd.DataFrames. First element should be train set. standarize : bool, default = True Standardize features by removing the mean and scaling to unit variance bounds : dict, default = {} Dicitionary spcecifying cutoff min and max values for variables skip : list, default = None Variables to ommit from the preprocessing Returns ------- preprocessed: List with pd.DataFrames """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") if standarize: scaler = StandardScaler().fit(arrays[0]) preprocessed = [] for i in arrays: # Cutoff or filter if len(bounds) > 0: for k, bon in bounds.items(): i = i.loc[(i[k] >= bon[0]) & (i[k] <= bon[1])] # Scaling if standarize: preprocessed.append(pd.DataFrame(scaler.transform(i), index=i.index, columns=i.columns)) else: preprocessed.append(i) if skip: for k in skip: preprocessed[-1][k] = i[k] if standarize: preprocessed.append(scaler) return preprocessed
Python
def save_model(parentdir, model, settings = None): """Save full TensorFlow model and settings Parameters ---------- parentdir: root directory model: TensorFlow model settings: dictionary with training settings Returns ------- save_path """ save_path = os.path.join(parentdir,'surrogate_models','saved_models',model.name) model.save(save_path) if settings: with open(os.path.join(save_path,'settings.pickle'), 'wb') as file: pickle.dump(settings, file) return save_path
def save_model(parentdir, model, settings = None): """Save full TensorFlow model and settings Parameters ---------- parentdir: root directory model: TensorFlow model settings: dictionary with training settings Returns ------- save_path """ save_path = os.path.join(parentdir,'surrogate_models','saved_models',model.name) model.save(save_path) if settings: with open(os.path.join(save_path,'settings.pickle'), 'wb') as file: pickle.dump(settings, file) return save_path
Python
def load_model(model_dir): """Load full TensorFlow model and settings Parameters ---------- model_dir: directory of saved model Returns ------- model settings """ model = tf.keras.models.load_model(model_dir) try: with open(os.path.join(model_dir,'settings.pickle'), 'rb') as file: settings = pickle.load(file) except: settings = None return model, settings
def load_model(model_dir): """Load full TensorFlow model and settings Parameters ---------- model_dir: directory of saved model Returns ------- model settings """ model = tf.keras.models.load_model(model_dir) try: with open(os.path.join(model_dir,'settings.pickle'), 'rb') as file: settings = pickle.load(file) except: settings = None return model, settings
Python
def permission_grants_for_service_account(self, name): # type: (str) -> List[ServiceAccountPermissionGrant] """Return all permission grants for a service account. TODO(rra): Currently does not expand permission aliases because they are not expanded by the graph. """ user_details = self.graph.get_user_details(name) permissions = [] for permission_data in user_details["permissions"]: permission = ServiceAccountPermissionGrant( service_account=name, permission=permission_data["permission"], argument=permission_data["argument"], granted_on=datetime.utcfromtimestamp(permission_data["granted_on"]), is_alias=False, grant_id=None, ) permissions.append(permission) return permissions
def permission_grants_for_service_account(self, name): # type: (str) -> List[ServiceAccountPermissionGrant] """Return all permission grants for a service account. TODO(rra): Currently does not expand permission aliases because they are not expanded by the graph. """ user_details = self.graph.get_user_details(name) permissions = [] for permission_data in user_details["permissions"]: permission = ServiceAccountPermissionGrant( service_account=name, permission=permission_data["permission"], argument=permission_data["argument"], granted_on=datetime.utcfromtimestamp(permission_data["granted_on"]), is_alias=False, grant_id=None, ) permissions.append(permission) return permissions
Python
def permission_grants_for_service_account(self, name): # type: (str) -> List[ServiceAccountPermissionGrant] """Return all permission grants for a service account. TODO(rra): Currently does not expand permission aliases. """ grants = self.session.query( Permission.name, ServiceAccountPermissionMap.argument, ServiceAccountPermissionMap.granted_on, ServiceAccountPermissionMap.id, ).filter( User.username == name, User.enabled == True, ServiceAccount.user_id == User.id, Permission.id == ServiceAccountPermissionMap.permission_id, ServiceAccountPermissionMap.service_account_id == ServiceAccount.id, ) return [ ServiceAccountPermissionGrant( service_account=name, permission=g.name, argument=g.argument, granted_on=g.granted_on, is_alias=False, grant_id=g.id, ) for g in grants.all() ]
def permission_grants_for_service_account(self, name): # type: (str) -> List[ServiceAccountPermissionGrant] """Return all permission grants for a service account. TODO(rra): Currently does not expand permission aliases. """ grants = self.session.query( Permission.name, ServiceAccountPermissionMap.argument, ServiceAccountPermissionMap.granted_on, ServiceAccountPermissionMap.id, ).filter( User.username == name, User.enabled == True, ServiceAccount.user_id == User.id, Permission.id == ServiceAccountPermissionMap.permission_id, ServiceAccountPermissionMap.service_account_id == ServiceAccount.id, ) return [ ServiceAccountPermissionGrant( service_account=name, permission=g.name, argument=g.argument, granted_on=g.granted_on, is_alias=False, grant_id=g.id, ) for g in grants.all() ]
Python
def permission_grants_for_user(self, name): # type: (str) -> List[GroupPermissionGrant] """Return all permission grants a user has from whatever source. TODO(rra): Currently does not expand permission aliases, and therefore doesn't match the graph behavior. Use with caution until that is fixed. """ now = datetime.utcnow() user = User.get(self.session, name=name) if not user or user.role_user or user.is_service_account or not user.enabled: return [] # Get the groups of which this user is a direct member. groups = ( self.session.query(Group.id) .join(GroupEdge, Group.id == GroupEdge.group_id) .join(User, User.id == GroupEdge.member_pk) .filter( Group.enabled == True, User.id == user.id, GroupEdge.active == True, GroupEdge.member_type == OBJ_TYPES["User"], GroupEdge._role != GROUP_EDGE_ROLES.index("np-owner"), or_(GroupEdge.expiration > now, GroupEdge.expiration == None), ) .distinct() ) group_ids = [g.id for g in groups] # If the user was not a member of any group, we can return early. if not group_ids: return [] # Now, get the parent groups of those groups and so forth until we run out of levels of the # tree. Use a set of seen group_ids to avoid querying the same group twice if a user is a # member of it via multiple paths. seen_group_ids = set(group_ids) while group_ids: parent_groups = ( self.session.query(Group.id) .join(GroupEdge, Group.id == GroupEdge.group_id) .filter( GroupEdge.member_pk.in_(group_ids), Group.enabled == True, GroupEdge.active == True, GroupEdge.member_type == OBJ_TYPES["Group"], GroupEdge._role != GROUP_EDGE_ROLES.index("np-owner"), or_(GroupEdge.expiration > now, GroupEdge.expiration == None), ) .distinct() ) group_ids = [g.id for g in parent_groups if g.id not in seen_group_ids] seen_group_ids.update(group_ids) # Return the permission grants. group_permission_grants = ( self.session.query( Group.groupname, Permission.name, PermissionMap.argument, PermissionMap.granted_on, PermissionMap.id, ) .filter( Permission.id == PermissionMap.permission_id, PermissionMap.group_id.in_(seen_group_ids), Group.id == PermissionMap.group_id, ) .all() ) return [ GroupPermissionGrant( group=g.groupname, permission=g.name, argument=g.argument, granted_on=g.granted_on, is_alias=False, grant_id=g.id, ) for g in group_permission_grants ]
def permission_grants_for_user(self, name): # type: (str) -> List[GroupPermissionGrant] """Return all permission grants a user has from whatever source. TODO(rra): Currently does not expand permission aliases, and therefore doesn't match the graph behavior. Use with caution until that is fixed. """ now = datetime.utcnow() user = User.get(self.session, name=name) if not user or user.role_user or user.is_service_account or not user.enabled: return [] # Get the groups of which this user is a direct member. groups = ( self.session.query(Group.id) .join(GroupEdge, Group.id == GroupEdge.group_id) .join(User, User.id == GroupEdge.member_pk) .filter( Group.enabled == True, User.id == user.id, GroupEdge.active == True, GroupEdge.member_type == OBJ_TYPES["User"], GroupEdge._role != GROUP_EDGE_ROLES.index("np-owner"), or_(GroupEdge.expiration > now, GroupEdge.expiration == None), ) .distinct() ) group_ids = [g.id for g in groups] # If the user was not a member of any group, we can return early. if not group_ids: return [] # Now, get the parent groups of those groups and so forth until we run out of levels of the # tree. Use a set of seen group_ids to avoid querying the same group twice if a user is a # member of it via multiple paths. seen_group_ids = set(group_ids) while group_ids: parent_groups = ( self.session.query(Group.id) .join(GroupEdge, Group.id == GroupEdge.group_id) .filter( GroupEdge.member_pk.in_(group_ids), Group.enabled == True, GroupEdge.active == True, GroupEdge.member_type == OBJ_TYPES["Group"], GroupEdge._role != GROUP_EDGE_ROLES.index("np-owner"), or_(GroupEdge.expiration > now, GroupEdge.expiration == None), ) .distinct() ) group_ids = [g.id for g in parent_groups if g.id not in seen_group_ids] seen_group_ids.update(group_ids) # Return the permission grants. group_permission_grants = ( self.session.query( Group.groupname, Permission.name, PermissionMap.argument, PermissionMap.granted_on, PermissionMap.id, ) .filter( Permission.id == PermissionMap.permission_id, PermissionMap.group_id.in_(seen_group_ids), Group.id == PermissionMap.group_id, ) .all() ) return [ GroupPermissionGrant( group=g.groupname, permission=g.name, argument=g.argument, granted_on=g.granted_on, is_alias=False, grant_id=g.id, ) for g in group_permission_grants ]
Python
def is_valid_service_account_name(self, name): # type: (str) -> Tuple[bool, Optional[str]] """Check if the given name is valid for use as a service account. Returns: Tuple whose first element is True or False indicating whether it is valid, and whose second element is None if valid and an error message if not. """ if len(name) > MAX_NAME_LENGTH: error = "{} is longer than {} characters".format(name, MAX_NAME_LENGTH) return (False, error) if not re.match("^{}$".format(SERVICE_ACCOUNT_VALIDATION), name): error = "{} is not a valid service account name (does not match {})".format( name, SERVICE_ACCOUNT_VALIDATION ) return (False, error) if name.split("@")[-1] != self.settings.service_account_email_domain: error = "All service accounts must end in @{}".format( self.settings.service_account_email_domain ) return (False, error) try: self.plugins.check_service_account_name(name) except PluginRejectedServiceAccountName as e: return (False, str(e)) return (True, None)
def is_valid_service_account_name(self, name): # type: (str) -> Tuple[bool, Optional[str]] """Check if the given name is valid for use as a service account. Returns: Tuple whose first element is True or False indicating whether it is valid, and whose second element is None if valid and an error message if not. """ if len(name) > MAX_NAME_LENGTH: error = "{} is longer than {} characters".format(name, MAX_NAME_LENGTH) return (False, error) if not re.match("^{}$".format(SERVICE_ACCOUNT_VALIDATION), name): error = "{} is not a valid service account name (does not match {})".format( name, SERVICE_ACCOUNT_VALIDATION ) return (False, error) if name.split("@")[-1] != self.settings.service_account_email_domain: error = "All service accounts must end in @{}".format( self.settings.service_account_email_domain ) return (False, error) try: self.plugins.check_service_account_name(name) except PluginRejectedServiceAccountName as e: return (False, str(e)) return (True, None)
Python
def assert_controllers_are_auditors(group): # type: (Group) -> bool """Return whether not all owners/np-owners/managers in a group (and below) are auditors This is used to ensure that all of the people who can control a group (owners, np-owners, managers) and all subgroups (all the way down the tree) have audit permissions. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the tree is completely controlled by auditors, else it will raise as above. """ graph = Graph() checked = set() # type: Set[str] queue = [group.name] while queue: cur_group = queue.pop() if cur_group in checked: continue details = graph.get_group_details(cur_group) for chk_user, info in iteritems(details["users"]): if chk_user in checked: continue # Only examine direct members of this group, because then the role is accurate. if info["distance"] == 1: if info["rolename"] == "member": continue if user_is_auditor(chk_user): checked.add(chk_user) else: raise UserNotAuditor( "User {} has role '{}' in the group {} but lacks the auditing " "permission ('{}').".format( chk_user, info["rolename"], cur_group, PERMISSION_AUDITOR ) ) # Now put subgroups into the queue to examine. for chk_group, info in iteritems(details["subgroups"]): if info["distance"] == 1: queue.append(chk_group) # If we didn't raise, we're valid. return True
def assert_controllers_are_auditors(group): # type: (Group) -> bool """Return whether not all owners/np-owners/managers in a group (and below) are auditors This is used to ensure that all of the people who can control a group (owners, np-owners, managers) and all subgroups (all the way down the tree) have audit permissions. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the tree is completely controlled by auditors, else it will raise as above. """ graph = Graph() checked = set() # type: Set[str] queue = [group.name] while queue: cur_group = queue.pop() if cur_group in checked: continue details = graph.get_group_details(cur_group) for chk_user, info in iteritems(details["users"]): if chk_user in checked: continue # Only examine direct members of this group, because then the role is accurate. if info["distance"] == 1: if info["rolename"] == "member": continue if user_is_auditor(chk_user): checked.add(chk_user) else: raise UserNotAuditor( "User {} has role '{}' in the group {} but lacks the auditing " "permission ('{}').".format( chk_user, info["rolename"], cur_group, PERMISSION_AUDITOR ) ) # Now put subgroups into the queue to examine. for chk_group, info in iteritems(details["subgroups"]): if info["distance"] == 1: queue.append(chk_group) # If we didn't raise, we're valid. return True
Python
def assert_can_join(group, user_or_group, role="member"): # type: (Group, Union[Group, User], str) -> bool """Enforce audit rules on joining a group This applies the auditing rules to determine whether or not a given user can join the given group with the given role. Args: group (models.Group): The group to test against. user (models.User): The user attempting to join. role (str): The role being tested. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the user should be allowed per policy, else it will raise as above. """ # By definition, any user can join as a member to any group. if user_or_group.type == "User" and role == "member": return True # Else, we have to check if the group is audited. If not, anybody can join. graph = Graph() group_md = graph.get_group_details(group.name) if not group_md["audited"]: return True # Audited group. Easy case, let's see if we're checking a user. If so, the user must be # considered an auditor. if user_or_group.type == "User": if user_is_auditor(user_or_group.name): return True raise UserNotAuditor( "User {} lacks the auditing permission ('{}') so may only have the " "'member' role in this audited group.".format(user_or_group.name, PERMISSION_AUDITOR) ) # No, this is a group-joining-group case. In this situation we must walk the entire group # subtree and ensure that all owners/np-owners/managers are considered auditors. This data # is contained in the group metadetails, which contains all eventual members. # # We have to fetch each group's details individually though to figure out what someone's role # is in that particular group. return assert_controllers_are_auditors(user_or_group)
def assert_can_join(group, user_or_group, role="member"): # type: (Group, Union[Group, User], str) -> bool """Enforce audit rules on joining a group This applies the auditing rules to determine whether or not a given user can join the given group with the given role. Args: group (models.Group): The group to test against. user (models.User): The user attempting to join. role (str): The role being tested. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the user should be allowed per policy, else it will raise as above. """ # By definition, any user can join as a member to any group. if user_or_group.type == "User" and role == "member": return True # Else, we have to check if the group is audited. If not, anybody can join. graph = Graph() group_md = graph.get_group_details(group.name) if not group_md["audited"]: return True # Audited group. Easy case, let's see if we're checking a user. If so, the user must be # considered an auditor. if user_or_group.type == "User": if user_is_auditor(user_or_group.name): return True raise UserNotAuditor( "User {} lacks the auditing permission ('{}') so may only have the " "'member' role in this audited group.".format(user_or_group.name, PERMISSION_AUDITOR) ) # No, this is a group-joining-group case. In this situation we must walk the entire group # subtree and ensure that all owners/np-owners/managers are considered auditors. This data # is contained in the group metadetails, which contains all eventual members. # # We have to fetch each group's details individually though to figure out what someone's role # is in that particular group. return assert_controllers_are_auditors(user_or_group)
Python
def group_has_pending_audit_members(session, group): # type: (Session, Group) -> bool """Check if a group still has memberships with "pending" audit status Arg(s): session: The SQL session group: The group Return: True if the group still has memberships with "pending" audit status """ members_edge_ids = {member.edge_id for member in itervalues(group.my_members())} audit_members_statuses = session.query(AuditMember.status).filter( AuditMember.audit_id == group.audit_id, AuditMember.status == "pending", # only those members who have not left the group after the audit started AuditMember.edge_id.in_(members_edge_ids), ) return audit_members_statuses.count()
def group_has_pending_audit_members(session, group): # type: (Session, Group) -> bool """Check if a group still has memberships with "pending" audit status Arg(s): session: The SQL session group: The group Return: True if the group still has memberships with "pending" audit status """ members_edge_ids = {member.edge_id for member in itervalues(group.my_members())} audit_members_statuses = session.query(AuditMember.status).filter( AuditMember.audit_id == group.audit_id, AuditMember.status == "pending", # only those members who have not left the group after the audit started AuditMember.edge_id.in_(members_edge_ids), ) return audit_members_statuses.count()
Python
def write_error(self, status_code, **kwargs): # type: (int, **Any) -> None """Override for custom error page.""" message = kwargs.get("message", "Unknown error") if status_code >= 500 and status_code < 600: template = self.template_engine.get_template("errors/5xx.html") self.write( template.render({"is_active": self.is_active, "static_url": self.static_url}) ) else: template = self.template_engine.get_template("errors/generic.html") self.write( template.render( { "status_code": status_code, "message": message, "is_active": self.is_active, "trace_uuid": self.perf_trace_uuid, "static_url": self.static_url, } ) ) self.finish()
def write_error(self, status_code, **kwargs): # type: (int, **Any) -> None """Override for custom error page.""" message = kwargs.get("message", "Unknown error") if status_code >= 500 and status_code < 600: template = self.template_engine.get_template("errors/5xx.html") self.write( template.render({"is_active": self.is_active, "static_url": self.static_url}) ) else: template = self.template_engine.get_template("errors/generic.html") self.write( template.render( { "status_code": status_code, "message": message, "is_active": self.is_active, "trace_uuid": self.perf_trace_uuid, "static_url": self.static_url, } ) ) self.finish()
Python
def ensure_audit_security(perm_arg): # type: (Text) -> Callable[[Callable[..., None]], Callable[..., None]] """Decorator for web handler methods to ensure the current_user has the AUDIT_SECURITY permission with the specified argument. Args: perm_arg: the argument required for the audit permission. only 'public_keys' at this point. """ def _wrapper(f): # type: (Callable[..., None]) -> Callable[..., None] def _decorator(self, *args, **kwargs): # type: (GrouperHandler, *Any, **Any) -> None if not any( [ name == AUDIT_SECURITY and argument == perm_arg for name, argument, _, _ in user_permissions(self.session, self.current_user) ] ): return self.forbidden() f(self, *args, **kwargs) return wraps(f)(_decorator) return _wrapper
def ensure_audit_security(perm_arg): # type: (Text) -> Callable[[Callable[..., None]], Callable[..., None]] """Decorator for web handler methods to ensure the current_user has the AUDIT_SECURITY permission with the specified argument. Args: perm_arg: the argument required for the audit permission. only 'public_keys' at this point. """ def _wrapper(f): # type: (Callable[..., None]) -> Callable[..., None] def _decorator(self, *args, **kwargs): # type: (GrouperHandler, *Any, **Any) -> None if not any( [ name == AUDIT_SECURITY and argument == perm_arg for name, argument, _, _ in user_permissions(self.session, self.current_user) ] ): return self.forbidden() f(self, *args, **kwargs) return wraps(f)(_decorator) return _wrapper
Python
def lambda_handler(event, context): # assume token is valid until exception. TokenValid = True # Authorization token is passed in via a custom http header # The header is configurable in the API Gateway admin interface token = event['authorizationToken'] # Use the entire token as the Principal ID principalId = event['authorizationToken'] # setup return policy settings tmp = event['methodArn'].split(':') apiGatewayArnTmp = tmp[5].split('/') awsAccountId = tmp[4] policy = AuthPolicy(principalId, awsAccountId) policy.restApiId = apiGatewayArnTmp[0] policy.region = tmp[3] policy.stage = apiGatewayArnTmp[1] # Decode the token using the per-customer secret downloaded from the # Approov admin portal try: tokenContents = jwt.decode(token, SECRET, algorithms=['HS256']) except jwt.ExpiredSignatureError: # Signature has expired, token is bad TokenValid = False except: # Token could not be decoded, token is bad, or signature expired. TokenValid = False if (TokenValid): # Retrieve the time from the token if 'exp' in tokenContents: expiration = (tokenContents['exp']) # Convert to the appropriate format for the condition in the policy expirationTime = datetime.datetime.utcfromtimestamp(expiration).strftime('%Y-%m-%dT%H:%M:%SZ') else: TokenValid = False if (TokenValid): # Token and cliams are good, setup the access conditions. conditions = { "DateLessThanEquals": { "aws:CurrentTime": expirationTime } } # Allow all methods, restricted to those which match condition policy.allowMethodWithConditions(HttpVerb.ALL, '*', conditions) else: # if token invalid, deny access when token presented. # Deny all methods, restricted to those which match condition policy.denyMethod(HttpVerb.ALL, '*') '''finally, build the policy and exit the function using return''' return policy.build()
def lambda_handler(event, context): # assume token is valid until exception. TokenValid = True # Authorization token is passed in via a custom http header # The header is configurable in the API Gateway admin interface token = event['authorizationToken'] # Use the entire token as the Principal ID principalId = event['authorizationToken'] # setup return policy settings tmp = event['methodArn'].split(':') apiGatewayArnTmp = tmp[5].split('/') awsAccountId = tmp[4] policy = AuthPolicy(principalId, awsAccountId) policy.restApiId = apiGatewayArnTmp[0] policy.region = tmp[3] policy.stage = apiGatewayArnTmp[1] # Decode the token using the per-customer secret downloaded from the # Approov admin portal try: tokenContents = jwt.decode(token, SECRET, algorithms=['HS256']) except jwt.ExpiredSignatureError: # Signature has expired, token is bad TokenValid = False except: # Token could not be decoded, token is bad, or signature expired. TokenValid = False if (TokenValid): # Retrieve the time from the token if 'exp' in tokenContents: expiration = (tokenContents['exp']) # Convert to the appropriate format for the condition in the policy expirationTime = datetime.datetime.utcfromtimestamp(expiration).strftime('%Y-%m-%dT%H:%M:%SZ') else: TokenValid = False if (TokenValid): # Token and cliams are good, setup the access conditions. conditions = { "DateLessThanEquals": { "aws:CurrentTime": expirationTime } } # Allow all methods, restricted to those which match condition policy.allowMethodWithConditions(HttpVerb.ALL, '*', conditions) else: # if token invalid, deny access when token presented. # Deny all methods, restricted to those which match condition policy.denyMethod(HttpVerb.ALL, '*') '''finally, build the policy and exit the function using return''' return policy.build()
Python
def _addMethod(self, effect, verb, resource, conditions): '''Adds a method to the internal lists of allowed or denied methods. Each object in the internal list contains a resource ARN and a condition statement. The condition statement can be null.''' if verb != '*' and not hasattr(HttpVerb, verb): raise NameError('Invalid HTTP verb ' + verb + '. Allowed verbs in HttpVerb class') resourcePattern = re.compile(self.pathRegex) if not resourcePattern.match(resource): raise NameError('Invalid resource path: ' + resource + '. Path should match ' + self.pathRegex) if resource[:1] == '/': resource = resource[1:] resourceArn = ( 'arn:aws:execute-api:' + self.region + ':' + self.awsAccountId + ':' + self.restApiId + '/' + self.stage + '/' + verb + '/' + resource) if effect.lower() == 'allow': self.allowMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions }) elif effect.lower() == 'deny': self.denyMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions })
def _addMethod(self, effect, verb, resource, conditions): '''Adds a method to the internal lists of allowed or denied methods. Each object in the internal list contains a resource ARN and a condition statement. The condition statement can be null.''' if verb != '*' and not hasattr(HttpVerb, verb): raise NameError('Invalid HTTP verb ' + verb + '. Allowed verbs in HttpVerb class') resourcePattern = re.compile(self.pathRegex) if not resourcePattern.match(resource): raise NameError('Invalid resource path: ' + resource + '. Path should match ' + self.pathRegex) if resource[:1] == '/': resource = resource[1:] resourceArn = ( 'arn:aws:execute-api:' + self.region + ':' + self.awsAccountId + ':' + self.restApiId + '/' + self.stage + '/' + verb + '/' + resource) if effect.lower() == 'allow': self.allowMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions }) elif effect.lower() == 'deny': self.denyMethods.append({ 'resourceArn' : resourceArn, 'conditions' : conditions })
Python
def generate_moving_mnist(self, num_digits=2): ''' Get random trajectories for the digits and generate a video. ''' data = np.zeros((self.n_frames_total, self.image_size_, self.image_size_), dtype=np.float32) for n in range(num_digits): # Trajectory start_y, start_x = self.get_random_trajectory(self.n_frames_total) ind = random.randint(0, self.mnist.shape[0] - 1) digit_image = self.mnist[ind] for i in range(self.n_frames_total): top = start_y[i] left = start_x[i] bottom = top + self.digit_size_ right = left + self.digit_size_ # Draw digit data[i, top:bottom, left:right] = np.maximum(data[i, top:bottom, left:right], digit_image) data = data[..., np.newaxis] return data
def generate_moving_mnist(self, num_digits=2): ''' Get random trajectories for the digits and generate a video. ''' data = np.zeros((self.n_frames_total, self.image_size_, self.image_size_), dtype=np.float32) for n in range(num_digits): # Trajectory start_y, start_x = self.get_random_trajectory(self.n_frames_total) ind = random.randint(0, self.mnist.shape[0] - 1) digit_image = self.mnist[ind] for i in range(self.n_frames_total): top = start_y[i] left = start_x[i] bottom = top + self.digit_size_ right = left + self.digit_size_ # Draw digit data[i, top:bottom, left:right] = np.maximum(data[i, top:bottom, left:right], digit_image) data = data[..., np.newaxis] return data
Python
def knn(vec, vectors, k): """Return k-nearest neighbors of vec compared to each vector in vectors""" distances = [(idx, get_distance(vec, vecx)) for idx, vecx in enumerate(vectors)] return sorted(distances, key=lambda x: x[1])[:k]
def knn(vec, vectors, k): """Return k-nearest neighbors of vec compared to each vector in vectors""" distances = [(idx, get_distance(vec, vecx)) for idx, vecx in enumerate(vectors)] return sorted(distances, key=lambda x: x[1])[:k]
Python
def regr_error_rate(x_train, y_train, x_test, y_test, k): """Return regression prediction error rate on given data sets with specified k""" error = 0.0 for x_test_i, y_test_i in zip(x_test, y_test): pred = regr_predict(x_test_i, x_train, y_train, k) error += abs(pred - y_test_i) / y_test_i error_rate = error / len(y_test) return error_rate
def regr_error_rate(x_train, y_train, x_test, y_test, k): """Return regression prediction error rate on given data sets with specified k""" error = 0.0 for x_test_i, y_test_i in zip(x_test, y_test): pred = regr_predict(x_test_i, x_train, y_train, k) error += abs(pred - y_test_i) / y_test_i error_rate = error / len(y_test) return error_rate
Python
def cls_error_rate(x_train, y_train, x_test, y_test, k): """Return classification prediction error rate on given data sets with specified k""" error = 0.0 for x_test_i, y_test_i in zip(x_test, y_test): pred = cls_predict(x_test_i, x_train, y_train, k) # Compare predicted and real results if pred != y_test_i: error += 1 error_rate = error / len(y_test) return error_rate
def cls_error_rate(x_train, y_train, x_test, y_test, k): """Return classification prediction error rate on given data sets with specified k""" error = 0.0 for x_test_i, y_test_i in zip(x_test, y_test): pred = cls_predict(x_test_i, x_train, y_train, k) # Compare predicted and real results if pred != y_test_i: error += 1 error_rate = error / len(y_test) return error_rate
Python
def remove_context(sequence_df: pd.DataFrame, tag="all", prob_remove=0.7): """ Remove the previous word occurring before an entity Parameters ---------- sequence_df : A dataframe containing a sequence tag: Only try to replace entities with this tag. If None, we treat them all prob_remove: Probability of actually removing the context word before each entity Returns ------- """ if tag != "all": annotated_rows = sequence_df[sequence_df["tag"].str.contains(tag)].index else: # we treat all tags annotated_rows = sequence_df[sequence_df["tag"] != "O"].index non_annotated_rows = sequence_df[~sequence_df.index.isin(annotated_rows)].index to_modify_ids = [i - 1 for i in annotated_rows if random.random() < prob_remove] to_modify_ids = np.intersect1d(to_modify_ids, non_annotated_rows) # only remove those non-entity words for i in to_modify_ids: sequence_df = sequence_df.drop(i) return sequence_df
def remove_context(sequence_df: pd.DataFrame, tag="all", prob_remove=0.7): """ Remove the previous word occurring before an entity Parameters ---------- sequence_df : A dataframe containing a sequence tag: Only try to replace entities with this tag. If None, we treat them all prob_remove: Probability of actually removing the context word before each entity Returns ------- """ if tag != "all": annotated_rows = sequence_df[sequence_df["tag"].str.contains(tag)].index else: # we treat all tags annotated_rows = sequence_df[sequence_df["tag"] != "O"].index non_annotated_rows = sequence_df[~sequence_df.index.isin(annotated_rows)].index to_modify_ids = [i - 1 for i in annotated_rows if random.random() < prob_remove] to_modify_ids = np.intersect1d(to_modify_ids, non_annotated_rows) # only remove those non-entity words for i in to_modify_ids: sequence_df = sequence_df.drop(i) return sequence_df
Python
def remove_nested_entities(matched_entities): """ Find and remove matched entities that overlap with a larger one. I.e.: detect and remove entities contained in others. Ex. (Idris, 12, 17) is overlapped in (Faris Idris, 6, 17). :param matched_entities: :return: clean_matched_entities: without nested entities """ clean_matched_entities = [] len_matched_entities = len(matched_entities) for i in range(len_matched_entities): is_nested = False _, i_start_pos, i_end_pos = matched_entities[i] for j in range(len_matched_entities): _, j_start_pos, j_end_pos = matched_entities[j] if i_start_pos == j_start_pos and i_end_pos == j_end_pos: continue if i_start_pos >= j_start_pos and i_end_pos <= j_end_pos: # print(f"{matched_entities[i]} is nested :(") is_nested = True break if not is_nested: clean_matched_entities.append(matched_entities[i]) return clean_matched_entities
def remove_nested_entities(matched_entities): """ Find and remove matched entities that overlap with a larger one. I.e.: detect and remove entities contained in others. Ex. (Idris, 12, 17) is overlapped in (Faris Idris, 6, 17). :param matched_entities: :return: clean_matched_entities: without nested entities """ clean_matched_entities = [] len_matched_entities = len(matched_entities) for i in range(len_matched_entities): is_nested = False _, i_start_pos, i_end_pos = matched_entities[i] for j in range(len_matched_entities): _, j_start_pos, j_end_pos = matched_entities[j] if i_start_pos == j_start_pos and i_end_pos == j_end_pos: continue if i_start_pos >= j_start_pos and i_end_pos <= j_end_pos: # print(f"{matched_entities[i]} is nested :(") is_nested = True break if not is_nested: clean_matched_entities.append(matched_entities[i]) return clean_matched_entities
Python
def text_xml_alignment(per_line_tagged_tokens, text_lines, accept_errors=False): """ Align text with xml annotations. Returns a dict {line_1: [{token:'', replacement:'', tipo:'', line:''}, {} ], line_2: [{token:'', replacement:'', tipo:'', line:''}, {} ], ...} with the correct line numbers as found in the corresponding txt file. :param per_line_tagged_tokens: :param text_lines: :return: """ found_index = -1 per_line_tagged_tokens_copy = {} for line_nb_xml, replacements in per_line_tagged_tokens.items(): line_tagged_tokens = [info["token"] for info in replacements] # We iterate over all the lines of the text file to find the line with the tagged tokens of this # replacement_position for line_index, line in enumerate(text_lines[found_index + 1:], start=found_index + 1): if not line: continue line_tagged_tokens_matched = [] for idx, tagged_token in enumerate(line_tagged_tokens): matched = list(re.finditer(r"\b{}(?!\w)".format(re.escape(tagged_token)), line)) if matched: line_tagged_tokens_matched.extend([(t.group(),) + t.span() for t in matched]) else: break if not line_tagged_tokens_matched: continue line_tagged_tokens_matched_sorted = list(sorted(set(line_tagged_tokens_matched), key=lambda x: x[1])) line_tagged_tokens_matched_sorted = remove_nested_entities(line_tagged_tokens_matched_sorted) # check that the matched tagged tokens are in the same order as the original tagged tokens if len(line_tagged_tokens) == len(line_tagged_tokens_matched_sorted): if not all(line_tagged_tokens[i] == line_tagged_tokens_matched_sorted[i][0] for i in range(len(line_tagged_tokens))): # We found the same number of tagged tokens but they are not the same :( Next line..." continue else: # We have the same number of tagged tokens and the same tagged tokens :)" found_index = line_index per_line_tagged_tokens_copy[found_index] = replacements break if len(per_line_tagged_tokens_copy) != len(per_line_tagged_tokens) and not accept_errors: return None return per_line_tagged_tokens_copy
def text_xml_alignment(per_line_tagged_tokens, text_lines, accept_errors=False): """ Align text with xml annotations. Returns a dict {line_1: [{token:'', replacement:'', tipo:'', line:''}, {} ], line_2: [{token:'', replacement:'', tipo:'', line:''}, {} ], ...} with the correct line numbers as found in the corresponding txt file. :param per_line_tagged_tokens: :param text_lines: :return: """ found_index = -1 per_line_tagged_tokens_copy = {} for line_nb_xml, replacements in per_line_tagged_tokens.items(): line_tagged_tokens = [info["token"] for info in replacements] # We iterate over all the lines of the text file to find the line with the tagged tokens of this # replacement_position for line_index, line in enumerate(text_lines[found_index + 1:], start=found_index + 1): if not line: continue line_tagged_tokens_matched = [] for idx, tagged_token in enumerate(line_tagged_tokens): matched = list(re.finditer(r"\b{}(?!\w)".format(re.escape(tagged_token)), line)) if matched: line_tagged_tokens_matched.extend([(t.group(),) + t.span() for t in matched]) else: break if not line_tagged_tokens_matched: continue line_tagged_tokens_matched_sorted = list(sorted(set(line_tagged_tokens_matched), key=lambda x: x[1])) line_tagged_tokens_matched_sorted = remove_nested_entities(line_tagged_tokens_matched_sorted) # check that the matched tagged tokens are in the same order as the original tagged tokens if len(line_tagged_tokens) == len(line_tagged_tokens_matched_sorted): if not all(line_tagged_tokens[i] == line_tagged_tokens_matched_sorted[i][0] for i in range(len(line_tagged_tokens))): # We found the same number of tagged tokens but they are not the same :( Next line..." continue else: # We have the same number of tagged tokens and the same tagged tokens :)" found_index = line_index per_line_tagged_tokens_copy[found_index] = replacements break if len(per_line_tagged_tokens_copy) != len(per_line_tagged_tokens) and not accept_errors: return None return per_line_tagged_tokens_copy
Python
def find_reason_alignment_fail(per_line_tagged_entity: dict, text_lines: list): """ Determine the cause of the alignment fail. Until now there are 3 possible causes found: 1. There is a mysterious line 0 in the xml which seems to be an error. This is the most pervasive error. 2. The xml contains misspelled entities that are not found on the txt. 3. The xml contains entities that do not exist in the txt. (the xml comprehends a larger file) Error 2 and 3 are detected similarly, if we do not find an entity in the text but it is in the xml, there is one of these two errors. To specifically detect error 2, we would need to find some edit distance between all the entities and all the words of the text which seems unnecessary and quite expensive... Error 1 is simply detecting whether there is a key=0 in the per_per_line_tagged_entity dict. :param per_line_tagged_entity: :return:reason """ def remove_nested_xml_entities(per_line_entities): """ Find xml entities that overlap and remove them :param per_line_entities: {line_nb:[{token, replacement, tipo, position, line}, {}]} :return: has_nested_entities: bool telling whether there are nested entities or not :return: clean_matched_entities: without nested entities """ clean_per_line_entities = {} seen_nested = [] nested_entities = [] for line_nb, list_annotations in per_line_entities.items(): clean_matched_entities = [] len_list_annotations = len(list_annotations) for i in range(len_list_annotations): token_i = list_annotations[i]['token'] position_i = int(list_annotations[i]['position']) is_nested = False for j in range(len_list_annotations): token_j = list_annotations[j]['token'] position_j = int(list_annotations[j]['position']) if token_j == token_i and position_i == position_j: continue if token_i in token_j: if position_i >= position_j and position_i <= position_j + len(token_j): # print(f"{matched_entities[i]} is nested :(") is_nested = True nested_entities.append(list_annotations[i]) break seen_nested.append(is_nested) if not is_nested: clean_matched_entities.append(list_annotations[i]) clean_per_line_entities[line_nb] = clean_matched_entities return clean_per_line_entities, any(seen_nested), nested_entities text = " ".join(text_lines) reason = ("-1", "Error not contemplated. You should investigate!") all_entities = [token['token'] for list_tokens in per_line_tagged_entity.values() for token in list_tokens] if 0 in per_line_tagged_entity: reason = ("1", "There is a zero line in the XML file.") return reason all_entities_found = True for entity in list(set(all_entities)): if entity not in text: all_entities_found = False break if not all_entities_found: reason = ("2/3", f"Entity not found in TXT file. {entity} was not found.") return reason # check if we have entities not annotated in the XML. Count the number of occurrences of each entity and # compare it to the number of entities in the TXT file. count_entities = Counter(all_entities) for entity, count in count_entities.items(): found_in_text = len(re.findall(r"\b{}(?!\w)".format(re.escape(entity)), text)) if found_in_text != count: reason = ("4", f"Missing an instance of entity '{entity}' in the XML file. " f"A name/address was not properly pseudonymized.") return reason clean_per_line_entities, seen_nested, nested_entities = remove_nested_xml_entities( per_line_entities=per_line_tagged_entity) if seen_nested: reason = ("6", f"Nested entities found (we have two annotations for the same entity): {str(nested_entities)}") return reason return reason
def find_reason_alignment_fail(per_line_tagged_entity: dict, text_lines: list): """ Determine the cause of the alignment fail. Until now there are 3 possible causes found: 1. There is a mysterious line 0 in the xml which seems to be an error. This is the most pervasive error. 2. The xml contains misspelled entities that are not found on the txt. 3. The xml contains entities that do not exist in the txt. (the xml comprehends a larger file) Error 2 and 3 are detected similarly, if we do not find an entity in the text but it is in the xml, there is one of these two errors. To specifically detect error 2, we would need to find some edit distance between all the entities and all the words of the text which seems unnecessary and quite expensive... Error 1 is simply detecting whether there is a key=0 in the per_per_line_tagged_entity dict. :param per_line_tagged_entity: :return:reason """ def remove_nested_xml_entities(per_line_entities): """ Find xml entities that overlap and remove them :param per_line_entities: {line_nb:[{token, replacement, tipo, position, line}, {}]} :return: has_nested_entities: bool telling whether there are nested entities or not :return: clean_matched_entities: without nested entities """ clean_per_line_entities = {} seen_nested = [] nested_entities = [] for line_nb, list_annotations in per_line_entities.items(): clean_matched_entities = [] len_list_annotations = len(list_annotations) for i in range(len_list_annotations): token_i = list_annotations[i]['token'] position_i = int(list_annotations[i]['position']) is_nested = False for j in range(len_list_annotations): token_j = list_annotations[j]['token'] position_j = int(list_annotations[j]['position']) if token_j == token_i and position_i == position_j: continue if token_i in token_j: if position_i >= position_j and position_i <= position_j + len(token_j): # print(f"{matched_entities[i]} is nested :(") is_nested = True nested_entities.append(list_annotations[i]) break seen_nested.append(is_nested) if not is_nested: clean_matched_entities.append(list_annotations[i]) clean_per_line_entities[line_nb] = clean_matched_entities return clean_per_line_entities, any(seen_nested), nested_entities text = " ".join(text_lines) reason = ("-1", "Error not contemplated. You should investigate!") all_entities = [token['token'] for list_tokens in per_line_tagged_entity.values() for token in list_tokens] if 0 in per_line_tagged_entity: reason = ("1", "There is a zero line in the XML file.") return reason all_entities_found = True for entity in list(set(all_entities)): if entity not in text: all_entities_found = False break if not all_entities_found: reason = ("2/3", f"Entity not found in TXT file. {entity} was not found.") return reason # check if we have entities not annotated in the XML. Count the number of occurrences of each entity and # compare it to the number of entities in the TXT file. count_entities = Counter(all_entities) for entity, count in count_entities.items(): found_in_text = len(re.findall(r"\b{}(?!\w)".format(re.escape(entity)), text)) if found_in_text != count: reason = ("4", f"Missing an instance of entity '{entity}' in the XML file. " f"A name/address was not properly pseudonymized.") return reason clean_per_line_entities, seen_nested, nested_entities = remove_nested_xml_entities( per_line_entities=per_line_tagged_entity) if seen_nested: reason = ("6", f"Nested entities found (we have two annotations for the same entity): {str(nested_entities)}") return reason return reason
Python
def remove_nested_xml_entities(per_line_entities): """ Find xml entities that overlap and remove them :param per_line_entities: {line_nb:[{token, replacement, tipo, position, line}, {}]} :return: has_nested_entities: bool telling whether there are nested entities or not :return: clean_matched_entities: without nested entities """ clean_per_line_entities = {} seen_nested = [] nested_entities = [] for line_nb, list_annotations in per_line_entities.items(): clean_matched_entities = [] len_list_annotations = len(list_annotations) for i in range(len_list_annotations): token_i = list_annotations[i]['token'] position_i = int(list_annotations[i]['position']) is_nested = False for j in range(len_list_annotations): token_j = list_annotations[j]['token'] position_j = int(list_annotations[j]['position']) if token_j == token_i and position_i == position_j: continue if token_i in token_j: if position_i >= position_j and position_i <= position_j + len(token_j): # print(f"{matched_entities[i]} is nested :(") is_nested = True nested_entities.append(list_annotations[i]) break seen_nested.append(is_nested) if not is_nested: clean_matched_entities.append(list_annotations[i]) clean_per_line_entities[line_nb] = clean_matched_entities return clean_per_line_entities, any(seen_nested), nested_entities
def remove_nested_xml_entities(per_line_entities): """ Find xml entities that overlap and remove them :param per_line_entities: {line_nb:[{token, replacement, tipo, position, line}, {}]} :return: has_nested_entities: bool telling whether there are nested entities or not :return: clean_matched_entities: without nested entities """ clean_per_line_entities = {} seen_nested = [] nested_entities = [] for line_nb, list_annotations in per_line_entities.items(): clean_matched_entities = [] len_list_annotations = len(list_annotations) for i in range(len_list_annotations): token_i = list_annotations[i]['token'] position_i = int(list_annotations[i]['position']) is_nested = False for j in range(len_list_annotations): token_j = list_annotations[j]['token'] position_j = int(list_annotations[j]['position']) if token_j == token_i and position_i == position_j: continue if token_i in token_j: if position_i >= position_j and position_i <= position_j + len(token_j): # print(f"{matched_entities[i]} is nested :(") is_nested = True nested_entities.append(list_annotations[i]) break seen_nested.append(is_nested) if not is_nested: clean_matched_entities.append(list_annotations[i]) clean_per_line_entities[line_nb] = clean_matched_entities return clean_per_line_entities, any(seen_nested), nested_entities
Python
def print_errors(results_df: pd.DataFrame, type_error=None, window="single", return_string=False): """ Show the errors found in the read CoNLL file :param results_df: Input CoNLL file to test :param type_error: Dict containing the types of errors to show: ex.: {"true": "B-PER_NOM", "pred": "O"}. Show all the errors by default :param window: If "single", show the single misclassified token, if an int, show the previous and next n tokens :return_string: If True, print AND return a string with the results :return: """ from io import StringIO import sys errors_string = StringIO() old_stdout = sys.stdout if return_string: errors_string = StringIO() sys.stdout = errors_string results_df = results_df.fillna("") results_df.index = range(1, len(results_df) + 1) if type_error: errors_idx = results_df[(results_df["true_tag"] == type_error["true"]) & (results_df["pred_tag"] == type_error["pred"])].index else: errors_idx = results_df[results_df["pred_tag"] != results_df["true_tag"]].index if window == "single": final_df = results_df.loc[errors_idx] print(final_df.to_string()) elif isinstance(window, int): lower_bound, upper_bound = (-1, -1) for idx in errors_idx: if lower_bound < idx < upper_bound: continue lower_bound = max(0, idx - window) upper_bound = min(errors_idx.max(), idx + window) window_df = results_df.loc[lower_bound:upper_bound, :] print(f"Line {idx} of the CoNLL file:", end="\n\t") print(window_df, end="\n\n") if return_string: sys.stdout = old_stdout return errors_string.getvalue()
def print_errors(results_df: pd.DataFrame, type_error=None, window="single", return_string=False): """ Show the errors found in the read CoNLL file :param results_df: Input CoNLL file to test :param type_error: Dict containing the types of errors to show: ex.: {"true": "B-PER_NOM", "pred": "O"}. Show all the errors by default :param window: If "single", show the single misclassified token, if an int, show the previous and next n tokens :return_string: If True, print AND return a string with the results :return: """ from io import StringIO import sys errors_string = StringIO() old_stdout = sys.stdout if return_string: errors_string = StringIO() sys.stdout = errors_string results_df = results_df.fillna("") results_df.index = range(1, len(results_df) + 1) if type_error: errors_idx = results_df[(results_df["true_tag"] == type_error["true"]) & (results_df["pred_tag"] == type_error["pred"])].index else: errors_idx = results_df[results_df["pred_tag"] != results_df["true_tag"]].index if window == "single": final_df = results_df.loc[errors_idx] print(final_df.to_string()) elif isinstance(window, int): lower_bound, upper_bound = (-1, -1) for idx in errors_idx: if lower_bound < idx < upper_bound: continue lower_bound = max(0, idx - window) upper_bound = min(errors_idx.max(), idx + window) window_df = results_df.loc[lower_bound:upper_bound, :] print(f"Line {idx} of the CoNLL file:", end="\n\t") print(window_df, end="\n\n") if return_string: sys.stdout = old_stdout return errors_string.getvalue()
Python
def print_confusion_matrix( y_true: np.ndarray, y_pred: np.ndarray, labels: Optional[List] = None, hide_zeroes: bool = False, hide_diagonal: bool = False, hide_threshold: Optional[float] = None, return_string=False ): """Print a nicely formatted confusion matrix with labelled rows and columns. Predicted labels are in the top horizontal header, true labels on the vertical header. Args: y_true (np.ndarray): ground truth labels y_pred (np.ndarray): predicted labels labels (Optional[List], optional): list of all labels. If None, then all labels present in the data are displayed. Defaults to None. hide_zeroes (bool, optional): replace zero-values with an empty cell. Defaults to False. hide_diagonal (bool, optional): replace true positives (diagonal) with empty cells. Defaults to False. hide_threshold (Optional[float], optional): replace values below this threshold with empty cells. Set to None to display all values. Defaults to None. return_string (bool, optional): do not print directly and return a string with the confusion_matrix. Defaults to False. """ cm_string = StringIO() old_stdout = sys.stdout if return_string: cm_string = StringIO() sys.stdout = cm_string if labels is None: labels = np.unique(np.concatenate((y_true, y_pred))) cm = confusion_matrix(y_true, y_pred, labels=labels) # find which fixed column width will be used for the matrix columnwidth = max( [len(str(x)) for x in labels] + [5] ) # 5 is the minimum column width, otherwise the longest class name empty_cell = ' ' * columnwidth # top-left cell of the table that indicates that top headers are predicted classes, left headers are true classes padding_fst_cell = (columnwidth - 3) // 2 # double-slash is int division fst_empty_cell = padding_fst_cell * ' ' + 't/p' + ' ' * (columnwidth - padding_fst_cell - 3) fst_empty_cell = padding_fst_cell * ' ' + 't/p' + ' ' * (columnwidth) # Print header print(' ' + fst_empty_cell, end=' ') for label in labels: print(f'{label:{columnwidth}}', end=' ') # right-aligned label padded with spaces to columnwidth print() # newline # Print rows for i, label in enumerate(labels): print(f' {label:{columnwidth}}', end=' ') # right-aligned label padded with spaces to columnwidth for j in range(len(labels)): # cell value padded to columnwidth with spaces and displayed with 1 decimal cell = f'{cm[i, j]:{columnwidth}.1f}' if hide_zeroes: cell = cell if float(cm[i, j]) != 0 else empty_cell if hide_diagonal: cell = cell if i != j else empty_cell if hide_threshold: cell = cell if cm[i, j] > hide_threshold else empty_cell print(cell, end=' ') print() if return_string: sys.stdout = old_stdout return cm_string.getvalue()
def print_confusion_matrix( y_true: np.ndarray, y_pred: np.ndarray, labels: Optional[List] = None, hide_zeroes: bool = False, hide_diagonal: bool = False, hide_threshold: Optional[float] = None, return_string=False ): """Print a nicely formatted confusion matrix with labelled rows and columns. Predicted labels are in the top horizontal header, true labels on the vertical header. Args: y_true (np.ndarray): ground truth labels y_pred (np.ndarray): predicted labels labels (Optional[List], optional): list of all labels. If None, then all labels present in the data are displayed. Defaults to None. hide_zeroes (bool, optional): replace zero-values with an empty cell. Defaults to False. hide_diagonal (bool, optional): replace true positives (diagonal) with empty cells. Defaults to False. hide_threshold (Optional[float], optional): replace values below this threshold with empty cells. Set to None to display all values. Defaults to None. return_string (bool, optional): do not print directly and return a string with the confusion_matrix. Defaults to False. """ cm_string = StringIO() old_stdout = sys.stdout if return_string: cm_string = StringIO() sys.stdout = cm_string if labels is None: labels = np.unique(np.concatenate((y_true, y_pred))) cm = confusion_matrix(y_true, y_pred, labels=labels) # find which fixed column width will be used for the matrix columnwidth = max( [len(str(x)) for x in labels] + [5] ) # 5 is the minimum column width, otherwise the longest class name empty_cell = ' ' * columnwidth # top-left cell of the table that indicates that top headers are predicted classes, left headers are true classes padding_fst_cell = (columnwidth - 3) // 2 # double-slash is int division fst_empty_cell = padding_fst_cell * ' ' + 't/p' + ' ' * (columnwidth - padding_fst_cell - 3) fst_empty_cell = padding_fst_cell * ' ' + 't/p' + ' ' * (columnwidth) # Print header print(' ' + fst_empty_cell, end=' ') for label in labels: print(f'{label:{columnwidth}}', end=' ') # right-aligned label padded with spaces to columnwidth print() # newline # Print rows for i, label in enumerate(labels): print(f' {label:{columnwidth}}', end=' ') # right-aligned label padded with spaces to columnwidth for j in range(len(labels)): # cell value padded to columnwidth with spaces and displayed with 1 decimal cell = f'{cm[i, j]:{columnwidth}.1f}' if hide_zeroes: cell = cell if float(cm[i, j]) != 0 else empty_cell if hide_diagonal: cell = cell if i != j else empty_cell if hide_threshold: cell = cell if cm[i, j] > hide_threshold else empty_cell print(cell, end=' ') print() if return_string: sys.stdout = old_stdout return cm_string.getvalue()
Python
def yaml_convert_to_list(self): """ Function to load content from yaml and convert to list """ with open(r"config/config.yaml") as f: dict = yaml.load(f, Loader=yaml.FullLoader) print(dict) tags = dict.get("tags") return tags
def yaml_convert_to_list(self): """ Function to load content from yaml and convert to list """ with open(r"config/config.yaml") as f: dict = yaml.load(f, Loader=yaml.FullLoader) print(dict) tags = dict.get("tags") return tags
Python
def main(): """ Function to connect to polarion portal and extract the required data using queries amd return the result for populating in the email report. """ config = cf.Config() config.load() pol_obj = polarian_data() print("\n-----Extracting from Polarion-----") res = [] data = pol_obj.extract_data(config) print("\n Printing type ", type(data)) print("\nData :", data) res.append(data) print("\n---Exporting in sheets") sheets_obj = ets.export_data() row_count = sheets_obj.export_sheets(data, config) chart_obj = export_charts.Charts(config.file_name, config.GS_CREDENTIALS) if row_count == "3": chart_obj.create_charts(config.tags, row_count) else: chart_obj.update_charts(config.tags, row_count) print("\n-----Extracting component wise data from Polarion-----") component_data = pol_obj.extract_component_data(config) print("\n Printing type of component data", type(component_data)) print("\nComponent data :", component_data) res.append(component_data) print("\n-----Extracting component wise total testcase from Polarion-----") component_total_tc = pol_obj.extract_component_total_tc(config) print("\nPrinting type of component total testcases", type(component_total_tc)) print("\nComponent wise total testcases :", component_total_tc) res.append(component_total_tc) print("\nResults: ", res) return res
def main(): """ Function to connect to polarion portal and extract the required data using queries amd return the result for populating in the email report. """ config = cf.Config() config.load() pol_obj = polarian_data() print("\n-----Extracting from Polarion-----") res = [] data = pol_obj.extract_data(config) print("\n Printing type ", type(data)) print("\nData :", data) res.append(data) print("\n---Exporting in sheets") sheets_obj = ets.export_data() row_count = sheets_obj.export_sheets(data, config) chart_obj = export_charts.Charts(config.file_name, config.GS_CREDENTIALS) if row_count == "3": chart_obj.create_charts(config.tags, row_count) else: chart_obj.update_charts(config.tags, row_count) print("\n-----Extracting component wise data from Polarion-----") component_data = pol_obj.extract_component_data(config) print("\n Printing type of component data", type(component_data)) print("\nComponent data :", component_data) res.append(component_data) print("\n-----Extracting component wise total testcase from Polarion-----") component_total_tc = pol_obj.extract_component_total_tc(config) print("\nPrinting type of component total testcases", type(component_total_tc)) print("\nComponent wise total testcases :", component_total_tc) res.append(component_total_tc) print("\nResults: ", res) return res
Python
def extract_data(self, config): """ Function to fetch tier wise data from polarion. """ data = {} for i in config.tags: data[str(i)] = [] for filter_keys in config.filter.keys(): for val in config.filter[filter_keys]["values"]: query = ( config.filter[filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND " "NOT status" + ":" + "inactive" ) print(query) tc = self.TestCase.get_query_result_count(query) data[str(i)].append(tc) return data
def extract_data(self, config): """ Function to fetch tier wise data from polarion. """ data = {} for i in config.tags: data[str(i)] = [] for filter_keys in config.filter.keys(): for val in config.filter[filter_keys]["values"]: query = ( config.filter[filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND " "NOT status" + ":" + "inactive" ) print(query) tc = self.TestCase.get_query_result_count(query) data[str(i)].append(tc) return data
Python
def extract_component_total_tc(self, config): """ Function to fetch total testcase count per component from polarion """ component_total_tc = {} for component_filter_keys in config.component_filter.keys(): for val in config.component_filter[component_filter_keys]["values"]: component_total_tc[str(val)] = [] for i in config.tags: query = ( config.component_filter[component_filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND " "status" + ":" + "approved" ) print(query) tc = self.TestCase.get_query_result_count(query) component_total_tc[str(val)].append(tc) return component_total_tc
def extract_component_total_tc(self, config): """ Function to fetch total testcase count per component from polarion """ component_total_tc = {} for component_filter_keys in config.component_filter.keys(): for val in config.component_filter[component_filter_keys]["values"]: component_total_tc[str(val)] = [] for i in config.tags: query = ( config.component_filter[component_filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND " "status" + ":" + "approved" ) print(query) tc = self.TestCase.get_query_result_count(query) component_total_tc[str(val)].append(tc) return component_total_tc
Python
def extract_component_data(self, config): """ Function to fetch tier wise automated tc count for each component from polarion. """ component_data = {} for component_filter_keys in config.component_filter.keys(): for val in config.component_filter[component_filter_keys]["values"]: component_data[str(val)] = [] for i in config.tags: query = ( config.component_filter[component_filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND caseautomation.KEY:automated" + " AND " "status" + ":" + "approved" ) print(query) tc = self.TestCase.get_query_result_count(query) component_data[str(val)].append(tc) return component_data
def extract_component_data(self, config): """ Function to fetch tier wise automated tc count for each component from polarion. """ component_data = {} for component_filter_keys in config.component_filter.keys(): for val in config.component_filter[component_filter_keys]["values"]: component_data[str(val)] = [] for i in config.tags: query = ( config.component_filter[component_filter_keys]["keys"] + ":" + val + " AND " + config.key + ":" + str(i) + " AND project.id:" + config.project_id + " AND caseautomation.KEY:automated" + " AND " "status" + ":" + "approved" ) print(query) tc = self.TestCase.get_query_result_count(query) component_data[str(val)].append(tc) return component_data
Python
def load(self): """ Function loads the configuration information. """ with open(r"config/config.yaml", "r") as file: yaml_config = yaml.safe_load(file) self.project_id = yaml_config["project_id"] self.file_name = yaml_config["file_name"] self.key = yaml_config["key"] self.tags = yaml_config["tags"] self.filter = yaml_config["filter"] self.GS_CREDENTIALS = yaml_config["GS_CREDENTIALS"] self.sender_user = yaml_config["sender_user"] self.recipient_user = yaml_config["recipient_user"] self.component_filter = yaml_config["component_filter"]
def load(self): """ Function loads the configuration information. """ with open(r"config/config.yaml", "r") as file: yaml_config = yaml.safe_load(file) self.project_id = yaml_config["project_id"] self.file_name = yaml_config["file_name"] self.key = yaml_config["key"] self.tags = yaml_config["tags"] self.filter = yaml_config["filter"] self.GS_CREDENTIALS = yaml_config["GS_CREDENTIALS"] self.sender_user = yaml_config["sender_user"] self.recipient_user = yaml_config["recipient_user"] self.component_filter = yaml_config["component_filter"]
Python
def view(self): """ Function to display googlesheet creds. """ print(self.GS_CREDENTIALS)
def view(self): """ Function to display googlesheet creds. """ print(self.GS_CREDENTIALS)
Python
def send_email(gmail_user, recipients, subject, body): """ Function to send email from sender to receipients with the subject and message passed. """ sent_from = gmail_user msg = MIMEMultipart("mixed") msg["Subject"] = subject msg["From"] = gmail_user msg["To"] = ", ".join(recipients) # create html template for email body project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) print(project_dir) template_dir = os.path.join(project_dir, "polarion/html_template") jinja_env = Environment( extensions=[MarkdownExtension], loader=FileSystemLoader(template_dir), autoescape=select_autoescape(["html", "xml"]), ) template = jinja_env.get_template("automation_status.html") automation_status = template.render(items=body[0]) template = jinja_env.get_template("component_wise_data.html") # component_data = template.render(content=body[1]) component_data = template.render(content=body) # Record the MIME types of both parts - text/plain and text/html. table1 = MIMEText(automation_status, "html") table2 = MIMEText(component_data, "html") # Attach parts into message container. # According to RFC 2046, the last part of a multipart message, in this case # the HTML message, is best and preferred. msg.attach(table1) msg.attach(table2) try: s = smtplib.SMTP("localhost") s.sendmail(sent_from, recipients, msg.as_string()) s.quit() print("Email sent!") except: print("Something went wrong...{}", sys.exc_info()[0])
def send_email(gmail_user, recipients, subject, body): """ Function to send email from sender to receipients with the subject and message passed. """ sent_from = gmail_user msg = MIMEMultipart("mixed") msg["Subject"] = subject msg["From"] = gmail_user msg["To"] = ", ".join(recipients) # create html template for email body project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) print(project_dir) template_dir = os.path.join(project_dir, "polarion/html_template") jinja_env = Environment( extensions=[MarkdownExtension], loader=FileSystemLoader(template_dir), autoescape=select_autoescape(["html", "xml"]), ) template = jinja_env.get_template("automation_status.html") automation_status = template.render(items=body[0]) template = jinja_env.get_template("component_wise_data.html") # component_data = template.render(content=body[1]) component_data = template.render(content=body) # Record the MIME types of both parts - text/plain and text/html. table1 = MIMEText(automation_status, "html") table2 = MIMEText(component_data, "html") # Attach parts into message container. # According to RFC 2046, the last part of a multipart message, in this case # the HTML message, is best and preferred. msg.attach(table1) msg.attach(table2) try: s = smtplib.SMTP("localhost") s.sendmail(sent_from, recipients, msg.as_string()) s.quit() print("Email sent!") except: print("Something went wrong...{}", sys.exc_info()[0])
Python
def next_available_row(self, worksheet): """ Function calculates next available row in the gsheet """ str_list = list(filter(None, worksheet.col_values(1))) return str(len(str_list) + 1)
def next_available_row(self, worksheet): """ Function calculates next available row in the gsheet """ str_list = list(filter(None, worksheet.col_values(1))) return str(len(str_list) + 1)
Python
def export_sheets(self, data, config): """ Function to export the data fetched from polarion to gsheet. """ gc = self.gspread.service_account(filename=config.GS_CREDENTIALS) try: sh = gc.open(config.file_name) except self.SpreadsheetNotFound: print( "Spreadsheets in not exits or you have not shared with client email id" ) ws = sh.get_worksheet(0) if self.next_available_row(ws) == "1": ws.update("A1", "Tier Classification") set_column_width(ws, "A", 150) start_c = "B" for keys in data.keys(): end_c = chr(ord(start_c) + 2) cell_merge_name = start_c + str(1) + ":" + end_c + str(1) ws.merge_cells(cell_merge_name) print(start_c + str(1)) ws.update(start_c + str(1), keys.capitalize()) t_s = start_c for filter_keys in config.filter.keys(): for val, label in zip( config.filter[filter_keys]["values"], config.filter[filter_keys]["labels"], ): query = ( config.url + r"/#/workitems?query=" + config.key + "%3A" + keys + r"%20AND%20" + config.filter[filter_keys]["keys"] + r"%3A" + val + r"%20AND%20project.id%3A" + config.project_id ) val_insert = r'=HYPERLINK("' + query + r'","' + label + r'")' ws.update_acell(t_s + str(2), val_insert) t_s = chr(ord(t_s) + 1) start_c = chr(ord(start_c) + 3) strr = chr(ord("A") + (len(data.keys()) * 3)) ws.format("A1:" + strr + str(2), self.format_cell()) ws.update("A2", "Day") row_count = self.next_available_row(ws) print(row_count) day = str(datetime.datetime.now()).split(".")[0] print(day) ws.update("A" + row_count, day) start_c = "B" for keys in data.keys(): t_s = start_c for val in data[keys]: ws.update(t_s + row_count, val) t_s = chr(ord(t_s) + 1) start_c = chr(ord(start_c) + 3) strr = chr(ord("A") + (len(data.keys()) * 3)) ws.format("A" + row_count + ":" + strr + row_count, self.format_cell()) return row_count
def export_sheets(self, data, config): """ Function to export the data fetched from polarion to gsheet. """ gc = self.gspread.service_account(filename=config.GS_CREDENTIALS) try: sh = gc.open(config.file_name) except self.SpreadsheetNotFound: print( "Spreadsheets in not exits or you have not shared with client email id" ) ws = sh.get_worksheet(0) if self.next_available_row(ws) == "1": ws.update("A1", "Tier Classification") set_column_width(ws, "A", 150) start_c = "B" for keys in data.keys(): end_c = chr(ord(start_c) + 2) cell_merge_name = start_c + str(1) + ":" + end_c + str(1) ws.merge_cells(cell_merge_name) print(start_c + str(1)) ws.update(start_c + str(1), keys.capitalize()) t_s = start_c for filter_keys in config.filter.keys(): for val, label in zip( config.filter[filter_keys]["values"], config.filter[filter_keys]["labels"], ): query = ( config.url + r"/#/workitems?query=" + config.key + "%3A" + keys + r"%20AND%20" + config.filter[filter_keys]["keys"] + r"%3A" + val + r"%20AND%20project.id%3A" + config.project_id ) val_insert = r'=HYPERLINK("' + query + r'","' + label + r'")' ws.update_acell(t_s + str(2), val_insert) t_s = chr(ord(t_s) + 1) start_c = chr(ord(start_c) + 3) strr = chr(ord("A") + (len(data.keys()) * 3)) ws.format("A1:" + strr + str(2), self.format_cell()) ws.update("A2", "Day") row_count = self.next_available_row(ws) print(row_count) day = str(datetime.datetime.now()).split(".")[0] print(day) ws.update("A" + row_count, day) start_c = "B" for keys in data.keys(): t_s = start_c for val in data[keys]: ws.update(t_s + row_count, val) t_s = chr(ord(t_s) + 1) start_c = chr(ord(start_c) + 3) strr = chr(ord("A") + (len(data.keys()) * 3)) ws.format("A" + row_count + ":" + strr + row_count, self.format_cell()) return row_count
Python
def handler(event: Dict[Any, Any], context: Any) -> None: """ Handles incoming DynamoDB stream events. :param event: Invocation event. :param context: Invocation context. :return: No return. """ logger.info('Starting processing DynamoDB events.') use_embeddings = bool(SAGEMAKER_EMBEDDINGS_KEY and SAGEMAKER_ENDPOINT_NAME) if not use_embeddings and any([SAGEMAKER_EMBEDDINGS_KEY, SAGEMAKER_ENDPOINT_NAME]): raise OSError( f'In order to use sentence embedding, all of the following enviroment variables are required: ' f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. ' f'Else provide none of above.' ) # Send data to elasticsearch using bulk API. succeeded, failed = bulk( es, dynamodb_to_es_generator(event, use_embeddings), stats_only=True, raise_on_error=False, raise_on_exception=False, ) logger.info(f'Finished processing DynamoDB events. Succeeded: {succeeded}, failed: {failed}')
def handler(event: Dict[Any, Any], context: Any) -> None: """ Handles incoming DynamoDB stream events. :param event: Invocation event. :param context: Invocation context. :return: No return. """ logger.info('Starting processing DynamoDB events.') use_embeddings = bool(SAGEMAKER_EMBEDDINGS_KEY and SAGEMAKER_ENDPOINT_NAME) if not use_embeddings and any([SAGEMAKER_EMBEDDINGS_KEY, SAGEMAKER_ENDPOINT_NAME]): raise OSError( f'In order to use sentence embedding, all of the following enviroment variables are required: ' f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. ' f'Else provide none of above.' ) # Send data to elasticsearch using bulk API. succeeded, failed = bulk( es, dynamodb_to_es_generator(event, use_embeddings), stats_only=True, raise_on_error=False, raise_on_exception=False, ) logger.info(f'Finished processing DynamoDB events. Succeeded: {succeeded}, failed: {failed}')
Python
def dynamodb_to_es_generator( event: Dict[Any, Any], use_embeddings: bool = False ) -> Generator[Dict[str, Any], None, None]: """ Converts events form DynamoDB streams into a format suitable for Elasticsearch's bulk API. """ for record in event['Records']: try: if record['eventName'] == 'INSERT': item = DynamodbDecoder.decode_json(record['dynamodb']['NewImage']) if use_embeddings: item['question_embedding'] = get_embeddings(item['question']) yield { '_op_type': 'index', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], '_source': item, } elif record['eventName'] == 'MODIFY': item = DynamodbDecoder.decode_json(record['dynamodb']['NewImage']) if use_embeddings: item['question_embedding'] = get_embeddings(item['question']) yield { '_op_type': 'index', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], '_source': item, } elif record['eventName'] == 'REMOVE': item = DynamodbDecoder.decode_json(record['dynamodb']['Keys']) yield { '_op_type': 'delete', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], } except Exception: logger.error(f'Failed to process record {record}.') # Don't hold up everything for a single error. continue
def dynamodb_to_es_generator( event: Dict[Any, Any], use_embeddings: bool = False ) -> Generator[Dict[str, Any], None, None]: """ Converts events form DynamoDB streams into a format suitable for Elasticsearch's bulk API. """ for record in event['Records']: try: if record['eventName'] == 'INSERT': item = DynamodbDecoder.decode_json(record['dynamodb']['NewImage']) if use_embeddings: item['question_embedding'] = get_embeddings(item['question']) yield { '_op_type': 'index', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], '_source': item, } elif record['eventName'] == 'MODIFY': item = DynamodbDecoder.decode_json(record['dynamodb']['NewImage']) if use_embeddings: item['question_embedding'] = get_embeddings(item['question']) yield { '_op_type': 'index', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], '_source': item, } elif record['eventName'] == 'REMOVE': item = DynamodbDecoder.decode_json(record['dynamodb']['Keys']) yield { '_op_type': 'delete', '_index': ES_INDEX, '_id': item[PRIMARY_KEY_FIELD], } except Exception: logger.error(f'Failed to process record {record}.') # Don't hold up everything for a single error. continue
Python
def build_url(base: str, arg: str=None): """Build client-side urls for grapes.js library.""" if not arg: return base parts = list( filter(lambda part: part, base.split('/')) ) parts.append(arg) url = "/".join(parts) if base.startswith('/'): return "/" + url return url
def build_url(base: str, arg: str=None): """Build client-side urls for grapes.js library.""" if not arg: return base parts = list( filter(lambda part: part, base.split('/')) ) parts.append(arg) url = "/".join(parts) if base.startswith('/'): return "/" + url return url
Python
def save_video(self, targetdir): """Saves all your videos to targetdir """ for page in self.doc.get_pages(): if (page.annots): obj=self.doc.getobj(page.annots.objid) for i in obj: annotobj=i.resolve() try: if (annotobj["Subtype"].name=='RichMedia'): linktype="media" data=annotobj["RichMediaContent"].resolve() dataobj=data["Assets"].resolve() fstream=dataobj["Names"][1].resolve() filename=fstream["F"] fdata=fstream['EF']['F'].resolve().get_data() f=open(os.path.join(targetdir,filename),"w") f.write(fdata) f.close() except: pass
def save_video(self, targetdir): """Saves all your videos to targetdir """ for page in self.doc.get_pages(): if (page.annots): obj=self.doc.getobj(page.annots.objid) for i in obj: annotobj=i.resolve() try: if (annotobj["Subtype"].name=='RichMedia'): linktype="media" data=annotobj["RichMediaContent"].resolve() dataobj=data["Assets"].resolve() fstream=dataobj["Names"][1].resolve() filename=fstream["F"] fdata=fstream['EF']['F'].resolve().get_data() f=open(os.path.join(targetdir,filename),"w") f.write(fdata) f.close() except: pass
Python
def _rect(self, bbox): """ Changes a bounding box into something we can use with HTML (x,y,width,height measured from top left) """ pgbox=self.pgbox pgwidth=round(abs(pgbox[0]-pgbox[2])) pgheight=round(abs(pgbox[1]-pgbox[3])) x=round(min(bbox[0], bbox[2])) y=pgheight-(round(max(bbox[1],bbox[3]))) width=round(max(bbox[0], bbox[2])-min(bbox[0], bbox[2])) height=round(max(bbox[1], bbox[3])-min(bbox[1], bbox[3])) result={"x":x, "y":y, "width":width, "height":height} return result
def _rect(self, bbox): """ Changes a bounding box into something we can use with HTML (x,y,width,height measured from top left) """ pgbox=self.pgbox pgwidth=round(abs(pgbox[0]-pgbox[2])) pgheight=round(abs(pgbox[1]-pgbox[3])) x=round(min(bbox[0], bbox[2])) y=pgheight-(round(max(bbox[1],bbox[3]))) width=round(max(bbox[0], bbox[2])-min(bbox[0], bbox[2])) height=round(max(bbox[1], bbox[3])-min(bbox[1], bbox[3])) result={"x":x, "y":y, "width":width, "height":height} return result
Python
def _find_objid_pgnum(self, obj): """Given a page, return the page number """ i=0 for page in self.doc.get_pages(): i=i+1 if self.doc.getobj(page.pageid)==obj: return i return False
def _find_objid_pgnum(self, obj): """Given a page, return the page number """ i=0 for page in self.doc.get_pages(): i=i+1 if self.doc.getobj(page.pageid)==obj: return i return False
Python
def _intersects(self, layout, obj): """ Finds if the obj is contained within another object on the page """ origbbox=obj.bbox for otherobj in layout: if obj!=otherobj: otherbbox=otherobj.bbox if (origbbox[0]>=otherbbox[0]) and (origbbox[1]>=otherbbox[1]) and (origbbox[2]<=otherbbox[2]) and (origbbox[3]>=otherbbox[3]): return otherbbox return origbbox
def _intersects(self, layout, obj): """ Finds if the obj is contained within another object on the page """ origbbox=obj.bbox for otherobj in layout: if obj!=otherobj: otherbbox=otherobj.bbox if (origbbox[0]>=otherbbox[0]) and (origbbox[1]>=otherbbox[1]) and (origbbox[2]<=otherbbox[2]) and (origbbox[3]>=otherbbox[3]): return otherbbox return origbbox
Python
def index(): ''' Renders the App index page. :return: ''' db = mongo.db.arkep if request.method == 'GET': return render_template('index.html') elif request.method == 'POST': data = request.form.to_dict() db.insert(data) return json_util.dumps(data) else: return 'bad request'
def index(): ''' Renders the App index page. :return: ''' db = mongo.db.arkep if request.method == 'GET': return render_template('index.html') elif request.method == 'POST': data = request.form.to_dict() db.insert(data) return json_util.dumps(data) else: return 'bad request'
Python
def create_optimizer(self, t_total): """Create optimizer for training. Should be overwritten for special cases. :param model: (Any) model to train :param t_total: (int) total number of training steps :return: """ self.optimizer, self.scheduler = self._default_optimizer(t_total)
def create_optimizer(self, t_total): """Create optimizer for training. Should be overwritten for special cases. :param model: (Any) model to train :param t_total: (int) total number of training steps :return: """ self.optimizer, self.scheduler = self._default_optimizer(t_total)
Python
def process_batch(self, batch): """Process data batch for model input Sends each tensor to the model device. Should be overwritten for special cases. :param batch: (list) The batch from the dataloader :return: batch: (list) The batch to be input into the model, with each tensor on the model device """ return self._default_process_batch(batch)
def process_batch(self, batch): """Process data batch for model input Sends each tensor to the model device. Should be overwritten for special cases. :param batch: (list) The batch from the dataloader :return: batch: (list) The batch to be input into the model, with each tensor on the model device """ return self._default_process_batch(batch)
Python
def train_step(self, inputs, labels): """Perform a training step. Can be overwritten for special cases. :param inputs: (list) Batch inputs :return: loss: (Any) training loss """ inputs += [labels] outputs = self.model(*inputs) loss = outputs[0] # mean() to average on multi-gpu parallel training if self.args.n_gpu > 1: loss = loss.mean() if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps self.optimizer.zero_grad() if self.args.fp16: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm) return loss
def train_step(self, inputs, labels): """Perform a training step. Can be overwritten for special cases. :param inputs: (list) Batch inputs :return: loss: (Any) training loss """ inputs += [labels] outputs = self.model(*inputs) loss = outputs[0] # mean() to average on multi-gpu parallel training if self.args.n_gpu > 1: loss = loss.mean() if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps self.optimizer.zero_grad() if self.args.fp16: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm) return loss
Python
def eval_step(self, inputs, labels=None): """Perform a eval step. Can be overwritten for special cases. :param inputs: (list) Batch inputs :param labels: (torch.tensor) Gold labels for task, if provided :return: loss: (Any) training loss """ if labels is not None: inputs += [labels] outputs = self.model(*inputs) return outputs
def eval_step(self, inputs, labels=None): """Perform a eval step. Can be overwritten for special cases. :param inputs: (list) Batch inputs :param labels: (torch.tensor) Gold labels for task, if provided :return: loss: (Any) training loss """ if labels is not None: inputs += [labels] outputs = self.model(*inputs) return outputs
Python
def save_model(self): """Save a checkpoint in the output directory""" if os.path.isfile(self.model_checkpoint): return os.makedirs(self.model_checkpoint, exist_ok=True) # Only save the model itself if we are using distributed training model_to_save = self.model.module if hasattr(self.model, "module") else self.model state_dict = model_to_save.state_dict() torch.save(state_dict, f'{self.model_checkpoint}/pytorch_model.bin') # save config dill.dump(self.config, open(f'{self.model_checkpoint}/config.pt', 'wb')) # save optimizer torch.save(self.optimizer.state_dict(), f'{self.model_checkpoint}/optimizer.pt') # save scheduler torch.save(self.scheduler.state_dict(), f'{self.model_checkpoint}/scheduler.pt')
def save_model(self): """Save a checkpoint in the output directory""" if os.path.isfile(self.model_checkpoint): return os.makedirs(self.model_checkpoint, exist_ok=True) # Only save the model itself if we are using distributed training model_to_save = self.model.module if hasattr(self.model, "module") else self.model state_dict = model_to_save.state_dict() torch.save(state_dict, f'{self.model_checkpoint}/pytorch_model.bin') # save config dill.dump(self.config, open(f'{self.model_checkpoint}/config.pt', 'wb')) # save optimizer torch.save(self.optimizer.state_dict(), f'{self.model_checkpoint}/optimizer.pt') # save scheduler torch.save(self.scheduler.state_dict(), f'{self.model_checkpoint}/scheduler.pt')
Python
def additional_arg_parser(parser): """Custom parameters can be passed through the command line by creating a custom argument parsing function. :param parser: (argparse.ArgumentParser) :return: parser: (argparse.ArgumentParser) """ parser.add_argument('--input_size', type=int, default=2) parser.add_argument('--hidden_size', type=int, default=10) parser.add_argument('--output_size', type=int, default=1) return parser
def additional_arg_parser(parser): """Custom parameters can be passed through the command line by creating a custom argument parsing function. :param parser: (argparse.ArgumentParser) :return: parser: (argparse.ArgumentParser) """ parser.add_argument('--input_size', type=int, default=2) parser.add_argument('--hidden_size', type=int, default=10) parser.add_argument('--output_size', type=int, default=1) return parser
Python
def parse_input_arguments(additional_arg_parser): """Parses arguments passed from the command line. :param additional_arg_parser: (def) A custom argument parser created by the user that accepts additional arguments from the command line, outside the default arguments. :return: args: (argparse.Arguments) Command line arugments """ parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, default='', required=True, help='Name of the model to be used. A model dictionary must be provided to DLTrainer ' 'for looking up the model class.') parser.add_argument("--data_dir", type=str, default="data", help="The path to the data folder") parser.add_argument("--save_dir", type=str, default="save", help="The path where the model and log will be saved.") parser.add_argument("--run_name", type=str, default='baseline', help="Name of your model training run.") parser.add_argument('--recompute-features', action='store_true', help="Whether to recompute dataset features if they exist. " "This argument can be used by your Dataset class.") parser.add_argument('--load_pretrained', action='store_true', help='Whether to load pretrained model') parser.add_argument('--pretrained_checkpoint', type=str, default='', help="Directory of pretrained model. Required if load_pretrained is included.") parser.add_argument('--do_train', action='store_true', help="Whether to run training.") parser.add_argument('--do_eval', action='store_true', help="Whether to run evaluation.") parser.add_argument('--do_test', action='store_true', help="Whether to run test.") parser.add_argument('--no_eval_during_training', action='store_true', help='Whether to block evaluation during training.') parser.add_argument('--load_optimizer', action='store_true', help='Load saved optimizer from pretrained-checkpoint') parser.add_argument('--load_scheduler', action='store_true', help='Load saved scheduler from pretrained-checkpoint') parser.add_argument('--train_batch_size', type=int, default=16, help='Batch size for training, and evaluation if eval_batch_size=0') parser.add_argument('--eval_batch_size', type=int, default=0, help='Batch size for evaluation') parser.add_argument('--num_train_epochs', type=int, default=1) parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing backward & update steps.') parser.add_argument('--lr', type=float, default=5e-5, help='Initial learning rate for Adam.') parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay if we apply some') parser.add_argument('--adam_epsilon', type=float, default=1e-8, help='Epsilon for Adam optimizer') parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Max gradient norm') parser.add_argument('--warmup_steps', type=int, default=0, help='Linear warmup over warmup_steps') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--eval_every', type=int, default=5000) parser.add_argument('--logging_steps', type=int, default=1000, help='Log every X update steps') parser.add_argument('--no_early_stopping', action='store_true', help='Prevent trainer from stopping early when model performance converges on Dev set.') parser.add_argument('--early_stopping_steps', type=int, default=10, help='Stop training early if model does not exceed --early_stopping_tol for X steps.') parser.add_argument('--early_stopping_tol', type=float, default=1e-5, help='Stop training early if model does not exceed X for --early_stopping_steps steps.') parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='01', help='For fp16: Apex AMP optimization level selected in options. ' 'See details at https://nvidia.github.io/apex/amp.html') parser.add_argument('--local_rank', type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--node_index', type=int, default=-1, help='node index if multi-node_running') parser.add_argument('--gpu_per_node', type=int, default=-1, help='num of gpus per node') if additional_arg_parser is not None: parser = additional_arg_parser(parser) args = parser.parse_args() return args
def parse_input_arguments(additional_arg_parser): """Parses arguments passed from the command line. :param additional_arg_parser: (def) A custom argument parser created by the user that accepts additional arguments from the command line, outside the default arguments. :return: args: (argparse.Arguments) Command line arugments """ parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, default='', required=True, help='Name of the model to be used. A model dictionary must be provided to DLTrainer ' 'for looking up the model class.') parser.add_argument("--data_dir", type=str, default="data", help="The path to the data folder") parser.add_argument("--save_dir", type=str, default="save", help="The path where the model and log will be saved.") parser.add_argument("--run_name", type=str, default='baseline', help="Name of your model training run.") parser.add_argument('--recompute-features', action='store_true', help="Whether to recompute dataset features if they exist. " "This argument can be used by your Dataset class.") parser.add_argument('--load_pretrained', action='store_true', help='Whether to load pretrained model') parser.add_argument('--pretrained_checkpoint', type=str, default='', help="Directory of pretrained model. Required if load_pretrained is included.") parser.add_argument('--do_train', action='store_true', help="Whether to run training.") parser.add_argument('--do_eval', action='store_true', help="Whether to run evaluation.") parser.add_argument('--do_test', action='store_true', help="Whether to run test.") parser.add_argument('--no_eval_during_training', action='store_true', help='Whether to block evaluation during training.') parser.add_argument('--load_optimizer', action='store_true', help='Load saved optimizer from pretrained-checkpoint') parser.add_argument('--load_scheduler', action='store_true', help='Load saved scheduler from pretrained-checkpoint') parser.add_argument('--train_batch_size', type=int, default=16, help='Batch size for training, and evaluation if eval_batch_size=0') parser.add_argument('--eval_batch_size', type=int, default=0, help='Batch size for evaluation') parser.add_argument('--num_train_epochs', type=int, default=1) parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing backward & update steps.') parser.add_argument('--lr', type=float, default=5e-5, help='Initial learning rate for Adam.') parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay if we apply some') parser.add_argument('--adam_epsilon', type=float, default=1e-8, help='Epsilon for Adam optimizer') parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Max gradient norm') parser.add_argument('--warmup_steps', type=int, default=0, help='Linear warmup over warmup_steps') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--eval_every', type=int, default=5000) parser.add_argument('--logging_steps', type=int, default=1000, help='Log every X update steps') parser.add_argument('--no_early_stopping', action='store_true', help='Prevent trainer from stopping early when model performance converges on Dev set.') parser.add_argument('--early_stopping_steps', type=int, default=10, help='Stop training early if model does not exceed --early_stopping_tol for X steps.') parser.add_argument('--early_stopping_tol', type=float, default=1e-5, help='Stop training early if model does not exceed X for --early_stopping_steps steps.') parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='01', help='For fp16: Apex AMP optimization level selected in options. ' 'See details at https://nvidia.github.io/apex/amp.html') parser.add_argument('--local_rank', type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--node_index', type=int, default=-1, help='node index if multi-node_running') parser.add_argument('--gpu_per_node', type=int, default=-1, help='num of gpus per node') if additional_arg_parser is not None: parser = additional_arg_parser(parser) args = parser.parse_args() return args
Python
def train_setup(additional_arg_parser=None, args=None): """Parsing the training arguments from the command line. Setups up GPU training if applicable. Creates a logger for training/evaluation. :param additional_arg_parser: (def) A custom argument parser created by the user that accepts additional arguments from the command line, outside the default arguments. :return: args: (argparse.Arguments) Command line arugments :return: logger: (logging.Logger) Logger instance for logging events. """ if args is None: args = parse_input_arguments(additional_arg_parser) if args.do_eval or args.do_test: args.load_pretrained = True if args.load_pretrained and args.pretrained_checkpoint == '': raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained') if args.eval_batch_size == 0: args.eval_batch_size = args.train_batch_size if args.load_pretrained: args.save_dir = "/".join(args.pretrained_checkpoint.split('/')[:-1]) else: args.save_dir = get_save_dir(args.save_dir, args.run_name) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) args.start_epoch = 0 args.start_step = 0 split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test' logger = get_logger(args.save_dir, 'log_train') logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node)) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.local_rank += args.node_index * args.gpu_per_node args.n_gpu = 1 args.device = device logger.info("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, torch.distributed.get_world_size() if args.local_rank != -1 else 1) set_seed(args) return args, logger
def train_setup(additional_arg_parser=None, args=None): """Parsing the training arguments from the command line. Setups up GPU training if applicable. Creates a logger for training/evaluation. :param additional_arg_parser: (def) A custom argument parser created by the user that accepts additional arguments from the command line, outside the default arguments. :return: args: (argparse.Arguments) Command line arugments :return: logger: (logging.Logger) Logger instance for logging events. """ if args is None: args = parse_input_arguments(additional_arg_parser) if args.do_eval or args.do_test: args.load_pretrained = True if args.load_pretrained and args.pretrained_checkpoint == '': raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained') if args.eval_batch_size == 0: args.eval_batch_size = args.train_batch_size if args.load_pretrained: args.save_dir = "/".join(args.pretrained_checkpoint.split('/')[:-1]) else: args.save_dir = get_save_dir(args.save_dir, args.run_name) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) args.start_epoch = 0 args.start_step = 0 split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test' logger = get_logger(args.save_dir, 'log_train') logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node)) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.local_rank += args.node_index * args.gpu_per_node args.n_gpu = 1 args.device = device logger.info("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, torch.distributed.get_world_size() if args.local_rank != -1 else 1) set_seed(args) return args, logger
Python
def is_covered(): ''' Checking a commit coverage report generated by diff-cover, and parsing to see if any lines in the commit are not ocvered. Exiting with -1 if uncovered lines are found ''' cov_xml = open('commit_coverage.html', 'r+') cov_map = mmap(cov_xml.fileno(), 0) missing = b'Missing</b>: ' missing_loc = cov_map.find(missing) if missing_loc != -1: cov_map.seek(missing_loc + len(missing)) if int(cov_map.read(2)) > 0: print ('Some lines not covered in commit') exit(-1)
def is_covered(): ''' Checking a commit coverage report generated by diff-cover, and parsing to see if any lines in the commit are not ocvered. Exiting with -1 if uncovered lines are found ''' cov_xml = open('commit_coverage.html', 'r+') cov_map = mmap(cov_xml.fileno(), 0) missing = b'Missing</b>: ' missing_loc = cov_map.find(missing) if missing_loc != -1: cov_map.seek(missing_loc + len(missing)) if int(cov_map.read(2)) > 0: print ('Some lines not covered in commit') exit(-1)
Python
def receive(self, sock, byte_num, timeout=None): ''' Read a specific amount of bytes from a given socket ''' data = bytearray(byte_num) view = memoryview(data) total = 0 if timeout: sock.settimeout(timeout) while view: # Get whatever the socket gives and put it inside the bytearray received = sock.recv_into(view) if received == 0: raise ConnectionRefusedError('Client connection interrupted - 0 returned by socket') view = view[received:] total += received if timeout: sock.settimeout(None) return data
def receive(self, sock, byte_num, timeout=None): ''' Read a specific amount of bytes from a given socket ''' data = bytearray(byte_num) view = memoryview(data) total = 0 if timeout: sock.settimeout(timeout) while view: # Get whatever the socket gives and put it inside the bytearray received = sock.recv_into(view) if received == 0: raise ConnectionRefusedError('Client connection interrupted - 0 returned by socket') view = view[received:] total += received if timeout: sock.settimeout(None) return data
Python
def generate_table_metadata(self, query): ''' SQLite does not supply metadata after select query, keeping it during create ''' table_name = query[query.index('table') + 6 : query.index('(')].strip() names, types, is_null = zip(*((item.split()[0], item.split()[1], item.split()[-2]) for item in query[query.index('(')+1:-1].split(','))) types = [col_type.split('(') for col_type in types] self.cols_meta = [ ('true' if col_type[0] == 'text' else 'false', names[idx], 'false' if is_null[idx] == 'not' else 'true', sql_to_sqream_type[col_type[0]][0], sql_to_sqream_type[col_type[0]][1] if col_type[0] != 'varchar' else int(col_type[1][:-1]) ) for idx, col_type in enumerate(types) ] # Col sizes template, used for the response to "fetch" # text columns sizes need to be extracted deliberately from the binary buffer col_szs_tups = ((1*(meta[2] == 'true'), 4*(meta[0] == 'true'), meta[-1] if meta[-1] !=0 else -1) for meta in self.cols_meta) col_szs = [num for tup in col_szs_tups for num in tup if num !=0] # {"isTrueVarChar":{is_tvc},"name":"{name}","nullable":{is_nul},"type":["{col_type}",{col_size},0]} col_meta_st = '{{"isTrueVarChar":{},"name":"{}","nullable":{},"type":["{}",{},0]}}' self.table_meta[table_name] = [','.join(col_meta_st.format(*col_meta) for col_meta in self.cols_meta), col_szs]
def generate_table_metadata(self, query): ''' SQLite does not supply metadata after select query, keeping it during create ''' table_name = query[query.index('table') + 6 : query.index('(')].strip() names, types, is_null = zip(*((item.split()[0], item.split()[1], item.split()[-2]) for item in query[query.index('(')+1:-1].split(','))) types = [col_type.split('(') for col_type in types] self.cols_meta = [ ('true' if col_type[0] == 'text' else 'false', names[idx], 'false' if is_null[idx] == 'not' else 'true', sql_to_sqream_type[col_type[0]][0], sql_to_sqream_type[col_type[0]][1] if col_type[0] != 'varchar' else int(col_type[1][:-1]) ) for idx, col_type in enumerate(types) ] # Col sizes template, used for the response to "fetch" # text columns sizes need to be extracted deliberately from the binary buffer col_szs_tups = ((1*(meta[2] == 'true'), 4*(meta[0] == 'true'), meta[-1] if meta[-1] !=0 else -1) for meta in self.cols_meta) col_szs = [num for tup in col_szs_tups for num in tup if num !=0] # {"isTrueVarChar":{is_tvc},"name":"{name}","nullable":{is_nul},"type":["{col_type}",{col_size},0]} col_meta_st = '{{"isTrueVarChar":{},"name":"{}","nullable":{},"type":["{}",{},0]}}' self.table_meta[table_name] = [','.join(col_meta_st.format(*col_meta) for col_meta in self.cols_meta), col_szs]
Python
def calculate_col_szs(self, table_name, rows_num, binary_data): ''' The size of most columns is known in advance, except text columns ''' col_sizes = [size * rows_num for size in self.table_meta[table_name][1]] text_sizes_start = 0 for idx in range(len(col_sizes)-1): if col_sizes[idx+1] < 0: # Next one up is an unknown size of a text data column, here cometh thy money col_sizes[idx+1] = sum(unpack(f'{rows_num}i', binary_data[text_sizes_start : text_sizes_start + col_sizes[idx]])) # printdbg(f'text col size: {col_sizes[idx+1]}') text_sizes_start += col_sizes[idx] return ','.join(str(size) for size in col_sizes)
def calculate_col_szs(self, table_name, rows_num, binary_data): ''' The size of most columns is known in advance, except text columns ''' col_sizes = [size * rows_num for size in self.table_meta[table_name][1]] text_sizes_start = 0 for idx in range(len(col_sizes)-1): if col_sizes[idx+1] < 0: # Next one up is an unknown size of a text data column, here cometh thy money col_sizes[idx+1] = sum(unpack(f'{rows_num}i', binary_data[text_sizes_start : text_sizes_start + col_sizes[idx]])) # printdbg(f'text col size: {col_sizes[idx+1]}') text_sizes_start += col_sizes[idx] return ','.join(str(size) for size in col_sizes)
Python
def find_gdb_in_zip(zip_path): """Find Geodatabase name(s) in Zip file Args: - zip_path: path to Zip file Returns: str: name(s) """ with ZipFile(zip_path) as zf: names = zf.namelist() # Find gdb file(s) (it's actually a folder, hence /) return [x for x in names if x.endswith('.gdb/')]
def find_gdb_in_zip(zip_path): """Find Geodatabase name(s) in Zip file Args: - zip_path: path to Zip file Returns: str: name(s) """ with ZipFile(zip_path) as zf: names = zf.namelist() # Find gdb file(s) (it's actually a folder, hence /) return [x for x in names if x.endswith('.gdb/')]
Python
def find_combined_layer(path): """Find name of layer with combined categories Args: - path: path to file. If the path is a zipfile, must be of format: ``` zip:///path/to/zip/file.zip!gdb_file.gdb ``` Returns: str: name of combined layer in Geodatabase file """ layers = fiona.listlayers(path) match = 'Fee_Designation_Easement' matched_layers = [x for x in layers if match in x] assert len(matched_layers) == 1, '!=1 matched layer' return matched_layers[0]
def find_combined_layer(path): """Find name of layer with combined categories Args: - path: path to file. If the path is a zipfile, must be of format: ``` zip:///path/to/zip/file.zip!gdb_file.gdb ``` Returns: str: name of combined layer in Geodatabase file """ layers = fiona.listlayers(path) match = 'Fee_Designation_Easement' matched_layers = [x for x in layers if match in x] assert len(matched_layers) == 1, '!=1 matched layer' return matched_layers[0]
Python
def walk_up(bottom): """ mimic os.walk, but walk 'up' instead of down the directory tree """ while True: bottom = os.path.realpath(bottom) yield bottom, os.listdir(bottom) new_path = os.path.realpath(os.path.join(bottom, '..')) if new_path == bottom: return bottom = new_path
def walk_up(bottom): """ mimic os.walk, but walk 'up' instead of down the directory tree """ while True: bottom = os.path.realpath(bottom) yield bottom, os.listdir(bottom) new_path = os.path.realpath(os.path.join(bottom, '..')) if new_path == bottom: return bottom = new_path
Python
def cli(ctx, dry_run): """Standardized project tool for running common tasks""" ctx.obj = Project.find() ctx.obj.dry_run = dry_run if ctx.invoked_subcommand is None: ctx.obj.info()
def cli(ctx, dry_run): """Standardized project tool for running common tasks""" ctx.obj = Project.find() ctx.obj.dry_run = dry_run if ctx.invoked_subcommand is None: ctx.obj.info()
Python
def listN(indict,N=10,rand=False): """ Print first N items from a dictionary. Can use 'rand=True' to look at a random selection of dictionary elements. """ if rand: samp=random.sample(range(len(indict)),min([N,len(indict)])) else: samp=range(N) for i in samp: print str(list(indict)[i])+':'+str(indict[list(indict)[i]])
def listN(indict,N=10,rand=False): """ Print first N items from a dictionary. Can use 'rand=True' to look at a random selection of dictionary elements. """ if rand: samp=random.sample(range(len(indict)),min([N,len(indict)])) else: samp=range(N) for i in samp: print str(list(indict)[i])+':'+str(indict[list(indict)[i]])
Python
def evalNQueens(individual): """Evaluation function for the n-queens problem. The problem is to determine a configuration of n queens on a nxn chessboard such that no queen can be taken by one another. In this version, each queens is assigned to one column, and only one queen can be on each line. The evaluation function therefore only counts the number of conflicts along the diagonals. """ # Count the number of conflicts with other queens. # The conflicts can only be diagonal, count on each diagonal line left_diagonal = [0] * (2 * individual.size - 1) right_diagonal = [0] * (2 * individual.size - 1) # Sum the number of queens on each diagonal: for i in range(individual.size): left_diagonal[i + individual[i]] += 1 right_diagonal[individual.size - 1 - i + individual[i]] += 1 # Count the number of conflicts on each diagonal sum_ = 0 for i in range(2 * individual.size - 1): if left_diagonal[i] > 1: sum_ += left_diagonal[i] - 1 if right_diagonal[i] > 1: sum_ += right_diagonal[i] - 1 return sum_
def evalNQueens(individual): """Evaluation function for the n-queens problem. The problem is to determine a configuration of n queens on a nxn chessboard such that no queen can be taken by one another. In this version, each queens is assigned to one column, and only one queen can be on each line. The evaluation function therefore only counts the number of conflicts along the diagonals. """ # Count the number of conflicts with other queens. # The conflicts can only be diagonal, count on each diagonal line left_diagonal = [0] * (2 * individual.size - 1) right_diagonal = [0] * (2 * individual.size - 1) # Sum the number of queens on each diagonal: for i in range(individual.size): left_diagonal[i + individual[i]] += 1 right_diagonal[individual.size - 1 - i + individual[i]] += 1 # Count the number of conflicts on each diagonal sum_ = 0 for i in range(2 * individual.size - 1): if left_diagonal[i] > 1: sum_ += left_diagonal[i] - 1 if right_diagonal[i] > 1: sum_ += right_diagonal[i] - 1 return sum_
Python
def mu_comma_lambda_replacement(pop, offsprings): ''' Mu Comma Lambda replacement (mu, lambda) Args: pop (Population): original individuals offsprings (Popultion): offpsrings that will be inserted into pop Returns: popuplation from sorted offsprings ''' # get offpsrings sorted indexes fitness_values = [i.fitness.value for i in offsprings.individuals] sort_indexes = np.argsort(fitness_values) # replace population with sorted offpsrings for i in range(pop.size): pop.individuals[i] = offsprings.individuals[sort_indexes[i]].clone() return pop
def mu_comma_lambda_replacement(pop, offsprings): ''' Mu Comma Lambda replacement (mu, lambda) Args: pop (Population): original individuals offsprings (Popultion): offpsrings that will be inserted into pop Returns: popuplation from sorted offsprings ''' # get offpsrings sorted indexes fitness_values = [i.fitness.value for i in offsprings.individuals] sort_indexes = np.argsort(fitness_values) # replace population with sorted offpsrings for i in range(pop.size): pop.individuals[i] = offsprings.individuals[sort_indexes[i]].clone() return pop
Python
def mu_plus_lambda_replacement(pop, offsprings): ''' Mu Plus Lambda replacement (mu + lambda) Args: pop (Population): original individuals offsprings (Popultion): offpsrings that will be inserted into pop Returns: popuplation from sorted original population plus offsprings ''' # joins individuals and get sorted indexes joint_pop = np.concatenate((pop.individuals, offsprings.individuals)) fitness_values = [i.fitness.value for i in joint_pop] sort_indexes = np.argsort(fitness_values) # replace population with sorted joint populaton for i in range(pop.size): pop.individuals[i] = joint_pop[sort_indexes[i]].clone() return pop
def mu_plus_lambda_replacement(pop, offsprings): ''' Mu Plus Lambda replacement (mu + lambda) Args: pop (Population): original individuals offsprings (Popultion): offpsrings that will be inserted into pop Returns: popuplation from sorted original population plus offsprings ''' # joins individuals and get sorted indexes joint_pop = np.concatenate((pop.individuals, offsprings.individuals)) fitness_values = [i.fitness.value for i in joint_pop] sort_indexes = np.argsort(fitness_values) # replace population with sorted joint populaton for i in range(pop.size): pop.individuals[i] = joint_pop[sort_indexes[i]].clone() return pop
Python
def uncorrelated_n_steps_mutation(ind, epsilon=1e-08): ''' Uncorrelated N Steps Mutation Args: ind (Individual): individual to be mutated. First half is the problem, second half sigma values epsilon (float): minimum value sigma can have Returns: mutated individual ''' chromossome = ind.genotype n = int(chromossome.size / 2) tau1 = (1.0 / np.sqrt(2.0 * n)) * np.random.normal() tau2 = 1.0 / np.sqrt(2.0 * np.sqrt(n)) parameters = np.array(chromossome[n:]) sigmas = np.empty(n, dtype=chromossome.dtype) for i in range(sigmas.size): sigmas[i] = sigma_check( parameters[i] * np.exp(tau1 + tau2 * np.random.normal()), epsilon) values = np.array(chromossome[:n]) offspring = np.empty(n, dtype=chromossome.dtype) for i in range(values.size): offspring[i] = values[i] + sigmas[i] * np.random.normal() ind.genotype = np.concatenate((offspring, sigmas)) return ind
def uncorrelated_n_steps_mutation(ind, epsilon=1e-08): ''' Uncorrelated N Steps Mutation Args: ind (Individual): individual to be mutated. First half is the problem, second half sigma values epsilon (float): minimum value sigma can have Returns: mutated individual ''' chromossome = ind.genotype n = int(chromossome.size / 2) tau1 = (1.0 / np.sqrt(2.0 * n)) * np.random.normal() tau2 = 1.0 / np.sqrt(2.0 * np.sqrt(n)) parameters = np.array(chromossome[n:]) sigmas = np.empty(n, dtype=chromossome.dtype) for i in range(sigmas.size): sigmas[i] = sigma_check( parameters[i] * np.exp(tau1 + tau2 * np.random.normal()), epsilon) values = np.array(chromossome[:n]) offspring = np.empty(n, dtype=chromossome.dtype) for i in range(values.size): offspring[i] = values[i] + sigmas[i] * np.random.normal() ind.genotype = np.concatenate((offspring, sigmas)) return ind
Python
def uncorrelated_n_steps_mutation_adaptive(ind, epsilon=1e-08): ''' Uncorrelated N Steps Mutation Args: ind (Individual): individual to be mutated. First half is the problem, second half sigma values epsilon (float): minimum value sigma can have Returns: mutated individual ''' chromossome = ind.genotype parameters = ind.parameters n = chromossome.size tau1 = (1.0 / np.sqrt(2.0 * n)) * np.random.normal() tau2 = 1.0 / np.sqrt(2.0 * np.sqrt(n)) sigmas = np.empty(parameters.size, dtype=parameters.dtype) for i in range(sigmas.size): sigmas[i] = sigma_check( parameters[i] * np.exp(tau1 + tau2 * np.random.normal()), epsilon) offspring = np.empty(chromossome.size, dtype=chromossome.dtype) for i in range(chromossome.size): offspring[i] = chromossome[i] + sigmas[i] * np.random.normal() ind.genotype = chromossome ind.parameters = sigmas return ind
def uncorrelated_n_steps_mutation_adaptive(ind, epsilon=1e-08): ''' Uncorrelated N Steps Mutation Args: ind (Individual): individual to be mutated. First half is the problem, second half sigma values epsilon (float): minimum value sigma can have Returns: mutated individual ''' chromossome = ind.genotype parameters = ind.parameters n = chromossome.size tau1 = (1.0 / np.sqrt(2.0 * n)) * np.random.normal() tau2 = 1.0 / np.sqrt(2.0 * np.sqrt(n)) sigmas = np.empty(parameters.size, dtype=parameters.dtype) for i in range(sigmas.size): sigmas[i] = sigma_check( parameters[i] * np.exp(tau1 + tau2 * np.random.normal()), epsilon) offspring = np.empty(chromossome.size, dtype=chromossome.dtype) for i in range(chromossome.size): offspring[i] = chromossome[i] + sigmas[i] * np.random.normal() ind.genotype = chromossome ind.parameters = sigmas return ind
Python
def max_size_from_tree_max_depth(pset, tree_max_depth): ''' Max size from Tree Max Depth Args: pset (PrimitiveSet): set of functions, terminals and variables tree_max_depth (int): maximum tree depth allowed Returns: max total number of nodes possible given a tree depth and the largest arity in the function set ''' # total number of nodes = (N^L) / (N-1) # where N is the number of nodes (arity) and L is the tree depth max_arity = max(pset.arity_cache.keys()) return int(pow(max_arity, tree_max_depth) / (max_arity - 1))
def max_size_from_tree_max_depth(pset, tree_max_depth): ''' Max size from Tree Max Depth Args: pset (PrimitiveSet): set of functions, terminals and variables tree_max_depth (int): maximum tree depth allowed Returns: max total number of nodes possible given a tree depth and the largest arity in the function set ''' # total number of nodes = (N^L) / (N-1) # where N is the number of nodes (arity) and L is the tree depth max_arity = max(pset.arity_cache.keys()) return int(pow(max_arity, tree_max_depth) / (max_arity - 1))
Python
def make_fitness_regression(pset, fn, num_fitness_cases, loss=mean_squared_error): ''' Make Fitness Regression (1 variable) Args: pset (PrimitiveSet): set of functions, terminals and variables fn (functions): objective functions num_fitness_cases (int): number of features loss (functions): loss function Returns: function that receives a GP solution and evaluates it for regression (1 variable) ''' x_points = np.asarray([x for x in range(num_fitness_cases)]) y_points = np.asarray([fn(x) for x in x_points]) variable, _ = (list(pset.variables.values()))[0] def regression(solution): vars_inputs = {} x_evals = np.empty(num_fitness_cases) for i in range(num_fitness_cases): vars_inputs[variable] = x_points[i] x_evals[i] = pg.interpreter( pset, solution, run=True, vars_inputs=vars_inputs) return loss(x_evals, y_points) return regression
def make_fitness_regression(pset, fn, num_fitness_cases, loss=mean_squared_error): ''' Make Fitness Regression (1 variable) Args: pset (PrimitiveSet): set of functions, terminals and variables fn (functions): objective functions num_fitness_cases (int): number of features loss (functions): loss function Returns: function that receives a GP solution and evaluates it for regression (1 variable) ''' x_points = np.asarray([x for x in range(num_fitness_cases)]) y_points = np.asarray([fn(x) for x in x_points]) variable, _ = (list(pset.variables.values()))[0] def regression(solution): vars_inputs = {} x_evals = np.empty(num_fitness_cases) for i in range(num_fitness_cases): vars_inputs[variable] = x_points[i] x_evals[i] = pg.interpreter( pset, solution, run=True, vars_inputs=vars_inputs) return loss(x_evals, y_points) return regression
Python
def negative_tournament_selection(pop, size=3): ''' Negative Tournament Selection (minimization) Args: pop (Population): population to select from size (int): the tournament size Returns: cloned selected individual ''' worst = pop.individuals[np.random.randint(pop.size)] for step in range(2, size): current = pop.individuals[np.random.randint(pop.size)] worst = current if current.fitness.value > worst.fitness.value else worst return worst.clone()
def negative_tournament_selection(pop, size=3): ''' Negative Tournament Selection (minimization) Args: pop (Population): population to select from size (int): the tournament size Returns: cloned selected individual ''' worst = pop.individuals[np.random.randint(pop.size)] for step in range(2, size): current = pop.individuals[np.random.randint(pop.size)] worst = current if current.fitness.value > worst.fitness.value else worst return worst.clone()