code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def set_config_local(self, path): """Set the local configration via file path. This will override project defaults in the final configuration. If no local configuration is found on the argument path, a warning will be shown, and only default config is used. Arguments: path {str} -- Local config file location """ try: self.__config_local = dict(self.__read_yaml_file(path)) except FileNotFoundError: wrn = "Local config '{path}' not found, using project default" # Warning will show because it is in Exception block. warnings.warn(wrn.format(path=path)) self.__config_local = {}
Set the local configration via file path. This will override project defaults in the final configuration. If no local configuration is found on the argument path, a warning will be shown, and only default config is used. Arguments: path {str} -- Local config file location
set_config_local
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def __read_yaml_file(self, path): """Open the yaml file and load it to the variable. Return created list""" with open(path) as f: yaml_fields = yaml.load_all(f.read(), Loader=yaml.FullLoader) buff_results = [x for x in yaml_fields] if len(buff_results) > 1: result = buff_results[0] result['additions'] = buff_results[1:] else: result = buff_results[0] return result
Open the yaml file and load it to the variable. Return created list
__read_yaml_file
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def read_rule_file(path): """Open the file and load it to the variable. Return text""" with open(path) as f: rule_text = f.read() return rule_text
Open the file and load it to the variable. Return text
read_rule_file
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def read_yaml_file(path): """Open the yaml file and load it to the variable. Return created list""" if path == 'config.yml': wrn = "Use 'load_config' or 'ATCConfig' instead for config" # Warning will not show, # unless captured by logging facility or python called with -Wd warnings.warn(message=wrn, category=DeprecationWarning) return ATCConfig(path).config with open(path) as f: yaml_fields = yaml.load_all(f.read().replace('&', '&'), Loader=yaml.FullLoader) buff_results = [x for x in yaml_fields] if len(buff_results) > 1: result = buff_results[0] result['additions'] = buff_results[1:] else: result = buff_results[0] return result
Open the yaml file and load it to the variable. Return created list
read_yaml_file
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def confluence_get_page_id(apipath, auth, space, title): """Get confluence page ID based on title and space""" headers = { "Accept": "application/json", "Content-Type": "application/json" } url = apipath + "content" space_page_url = url + '?spaceKey=' + space + '&title=' \ + title + '&expand=space' response = requests.request( "GET", space_page_url, headers=headers, auth=auth ) if response.status_code == 401: print("Unauthorized Response. Try to use a token instead of a password. " + "Follow the guideline for more info: \n" + "https://developer.atlassian.com/cloud/confluence/basic-auth-" + "for-rest-apis/#supplying-basic-auth-headers") exit() else: response = response.json() # Check if response contains proper information and return it if so if response.get('results'): if isinstance(response['results'], list): if response['results'][0].get('id'): return response['results'][0][u'id'] # If page not found return None
Get confluence page ID based on title and space
confluence_get_page_id
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def sigma_lgsrc_fields_to_names(logsource_dict): """Get sigma logsource dict and rename key/values into our model, so we could use it for Data Needed calculation""" if logsource_dict: sigma_keys = [*sigma_mapping] proper_logsource_dict = {} for key, val in logsource_dict.items(): if key in sigma_keys: if val in sigma_keys: # Transalte both key and value proper_logsource_dict.update([ (sigma_mapping[key], sigma_mapping[val]) ]) else: # Translate only key proper_logsource_dict.update([ (sigma_mapping[key], val) ]) else: if val in sigma_keys: # Translate only value proper_logsource_dict.update([ (key, sigma_mapping[val]) ]) else: # Don't translate anything proper_logsource_dict.update([ (key, val) ]) return proper_logsource_dict else: return {}
Get sigma logsource dict and rename key/values into our model, so we could use it for Data Needed calculation
sigma_lgsrc_fields_to_names
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def check_for_event_ids_presence(detection_rule_obj): """check if this is event id based detection rule""" if detection_rule_obj.get('detection'): for _field in detection_rule_obj['detection']: if _field in ["condition", "timeframe"]: continue for __field in detection_rule_obj['detection'][_field]: if isinstance(__field, str) or isinstance(__field, int): if __field == 'EventID': return True elif isinstance(__field, dict): for item in __field: if item == 'EventID': return True return False
check if this is event id based detection rule
check_for_event_ids_presence
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def check_for_enrichment_presence(detection_rule_obj): """check if this Data for this Detection Rule required any enrichments""" if detection_rule_obj.get('enrichment'): return True else: return False
check if this Data for this Detection Rule required any enrichments
check_for_enrichment_presence
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def get_logsource_of_the_document(detection_rule_obj): """get logsource for specific document (addition)""" logsource = {} _temp_list = [] logsource_optional_fields = ['category', 'product', 'service'] if 'logsource' in detection_rule_obj: for val in logsource_optional_fields: if detection_rule_obj['logsource'].get(val): _temp_list.append((val, detection_rule_obj['logsource'].get(val))) logsource.update(_temp_list) else: return False return logsource
get logsource for specific document (addition)
get_logsource_of_the_document
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def calculate_dn_for_eventid_based_dr( dn_list, logsource, event_ids, has_command_line): """Meaning of the arguments: dn_list - list of Data Needed objects (all dataneeded!) logsource - dictionary of logsource fields of Detection Rule PER document event_ids - list of event ids per selection logsource = { "product": "windows", "service": "sysmon" } event_ids = [4624, 4625] """ list_of_DN_matched_by_logsource = [] list_of_DN_matched_by_logsource_and_eventid = [] proper_logsource = ATCutils.sigma_lgsrc_fields_to_names(logsource) # find all Data Needed which matched by logsource section from # Detection Rule for dn in dn_list: y = dn x = proper_logsource if 'platform' in x and 'channel' in x: if x.get('platform') == y.get('platform') and x.get( 'channel') == y.get('channel'): list_of_DN_matched_by_logsource.append(dn) else: if x.get('platform') == y.get('platform'): list_of_DN_matched_by_logsource.append(dn) # find all Data Needed which matched by logsource section from # Detection Rule AND EventID for dn in list_of_DN_matched_by_logsource: try: eventID_from_title = str(int(dn['title'].split("_")[2])) except ValueError: eventID_from_title = "None" if has_command_line == True and dn['title'] == \ "DN_0001_4688_windows_process_creation": continue if isinstance(event_ids, list): for eid in event_ids: if eventID_from_title == str(eid): list_of_DN_matched_by_logsource_and_eventid\ .append(dn) elif eventID_from_title == str(event_ids): list_of_DN_matched_by_logsource_and_eventid.append(dn) y = list_of_DN_matched_by_logsource_and_eventid return [x['title'] for x in y if x.get('title')]
Meaning of the arguments: dn_list - list of Data Needed objects (all dataneeded!) logsource - dictionary of logsource fields of Detection Rule PER document event_ids - list of event ids per selection logsource = { "product": "windows", "service": "sysmon" } event_ids = [4624, 4625]
calculate_dn_for_eventid_based_dr
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def calculate_dn_for_non_eventid_based_dr( dn_list, detection_fields, logsource): """Meaning of the arguments: dn_list - list of Data Needed objects (all dataneeded!) detection_fields - dictionary of fields from detection section of Detection Rule logsource - dictionary of logsource fields of Detection Rule detection_fields = { "CommandLine": 4738, "EventID": 1234 } logsource = { "product": "windows", "service": "sysmon" } """ list_of_DN_matched_by_fields = [] list_of_DN_matched_by_fields_and_logsource = [] proper_logsource = ATCutils.sigma_lgsrc_fields_to_names(logsource) for dn in dn_list: # Will create a list of keys from Detection Rule fields dictionary list_of_DR_fields = [*detection_fields] list_of_DN_fields = dn['fields'] amount_of_fields_in_DR = len(list_of_DR_fields) amount_of_intersections_betw_DR_and_DN_fields = len( set(list_of_DR_fields).intersection(list(set(list_of_DN_fields) ))) if amount_of_intersections_betw_DR_and_DN_fields \ == amount_of_fields_in_DR: # if they are equal, do.. list_of_DN_matched_by_fields.append(dn) for matched_dn in list_of_DN_matched_by_fields: y = matched_dn x = proper_logsource if x.get('category') == "process_creation": # should take care about unix events in future: todo if x.get('platform') == y.get('platform') and "process_creation" \ in y.get('title'): list_of_DN_matched_by_fields_and_logsource.append(matched_dn) elif 'platform' in x and 'channel' in x: if x.get('platform') == y.get('platform') and x.get( 'channel') == y.get('channel'): list_of_DN_matched_by_fields_and_logsource.append(matched_dn) else: if x.get('platform') == y.get('platform'): list_of_DN_matched_by_fields_and_logsource.append(matched_dn) y = list_of_DN_matched_by_fields_and_logsource return [x['title'] for x in y if x.get('title')]
Meaning of the arguments: dn_list - list of Data Needed objects (all dataneeded!) detection_fields - dictionary of fields from detection section of Detection Rule logsource - dictionary of logsource fields of Detection Rule detection_fields = { "CommandLine": 4738, "EventID": 1234 } logsource = { "product": "windows", "service": "sysmon" }
calculate_dn_for_non_eventid_based_dr
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def write_file(path, content, options="w+"): """Simple method for writing content to some file""" with open(path, options) as file: file.write(content) return True
Simple method for writing content to some file
write_file
python
atc-project/atomic-threat-coverage
scripts/atcutils.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atcutils.py
Apache-2.0
def get_rules(self): """ Retruns list of detection rules for customer """ dr_list_per_customer = [rules_by_title.get(dr_title)[0] for dr_title in self.detection_rules] return dr_list_per_customer
Retruns list of detection rules for customer
get_rules
python
atc-project/atomic-threat-coverage
scripts/customer.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/customer.py
Apache-2.0
def get_usecases(self): """ Retruns list of use cases for customer """ uc_list_per_customer = [usecases_by_title.get(uc_title)[0] for uc_title in self.use_cases] return uc_list_per_customer
Retruns list of use cases for customer
get_usecases
python
atc-project/atomic-threat-coverage
scripts/customer.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/customer.py
Apache-2.0
def render_template(self, template_type): """Render template with data in it template_type: - "markdown" - "confluence" """ if template_type not in ["markdown", "confluence"]: raise Exception( "Bad template_type. Available values: " + "[\"markdown\", \"confluence\"]") self.cu_fields.update( {'description': self.description.strip()} ) # Transform variables to arrays if not provided correctly in yaml if isinstance(self.data_needed, str): self.cu_fields.update({'dataneeded': [self.data_needed]}) if isinstance(self.logging_policies, str): self.cu_fields.update({'loggingpolicy': [self.logging_policies]}) detectionrule_with_path = [] for title in self.detection_rules: if title is not None: name = rules_by_title.get(title)[1] else: name = '' dr = (title, name) detectionrule_with_path.append(dr) self.cu_fields.update({'detectionrule': detectionrule_with_path}) usecase_with_path = [] if self.use_cases is not None: for title in self.use_cases: if title is not None: name = usecases_by_title.get(title)[1] else: name = '' uc = (title, name) usecase_with_path.append(uc) self.cu_fields.update({'usecase': usecase_with_path}) # Get proper template if template_type == "markdown": template = env\ .get_template('markdown_customer_template.md.j2') elif template_type == "confluence": template = env.get_template( 'confluence_customer_template.html.j2') self.cu_fields.update( {'confluence_viewpage_url': ATCconfig.get('confluence_viewpage_url')}) if not self.logging_policies: self.logging_policies = ["None", ] logging_policies_with_id = [] for lp in self.logging_policies: if lp != "None" and self.apipath and self.auth and self.space: logging_policies_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, lp)) else: logging_policies_id = "" lp = (lp, logging_policies_id) logging_policies_with_id.append(lp) self.cu_fields.update({'loggingpolicy': logging_policies_with_id}) if not self.data_needed: self.data_needed = ["None", ] data_needed_with_id = [] for dn in self.data_needed: if dn != "None" and self.apipath and self.auth and self.space: data_needed_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: data_needed_id = "" dn = (dn, data_needed_id) data_needed_with_id.append(dn) self.cu_fields.update({'data_needed': data_needed_with_id}) usecases_with_id = [] if self.use_cases is not None: for uc in self.use_cases: if uc != "None" and self.apipath and self.auth and self.space: usecase_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, uc)) else: usecase_id = "" uc = (uc, usecase_id) usecases_with_id.append(uc) self.cu_fields.update({'usecase': usecases_with_id}) detection_rules_with_id = [] for dn in self.detection_rules: if dn != "None" and self.apipath and self.auth and self.space: detection_rules_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: detection_rules_id = "" dn = (dn, detection_rules_id) detection_rules_with_id.append(dn) self.cu_fields.update({'detectionrule': detection_rules_with_id}) self.content = template.render(self.cu_fields) return True
Render template with data in it template_type: - "markdown" - "confluence"
render_template
python
atc-project/atomic-threat-coverage
scripts/customer.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/customer.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/customer.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/customer.py
Apache-2.0
def render_template(self, template_type): """Render template with data in it template_type: - "markdown" - "confluence" """ if template_type not in ["markdown", "confluence"]: raise Exception( "Bad template_type. Available values: " + "[\"markdown\", \"confluence\"]") # Get proper template if template_type == "markdown": template = env.get_template( 'markdown_alert_template.md.j2') # Read raw sigma rule sigma_rule = ATCutils.read_rule_file(self.yaml_file) # Put raw sigma rule into fields var self.fields.update({'sigma_rule': sigma_rule}) # Define which queries we want from Sigma #queries = ["es-qs", "xpack-watcher", "graylog", "splunk", "logpoint", "grep", "fieldlist"] queries = ATCconfig.get('detection_queries').split(",") # dict to store query key + query values det_queries = {} # Convert sigma rule into queries (for instance, graylog query) for query in queries: # prepare command to execute from shell # (yes, we know) if query == "powershell": cmd = ATCconfig.get('sigmac_path') + " -t " + query + \ " --config " + ATCconfig.get('powershell_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif query == "es-qs": cmd = ATCconfig.get('sigmac_path') + " -t " + query + \ " --config " + ATCconfig.get('es-qs_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif query == "xpack-watcher": cmd = ATCconfig.get('sigmac_path') + " -t " + query + \ " --config " + ATCconfig.get('xpack-watcher_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif query == "splunk": cmd = ATCconfig.get('sigmac_path') + " -t " + query + \ " --config " + ATCconfig.get('splunk_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif query == "logpoint": cmd = ATCconfig.get('sigmac_path') + " -t " + query + \ " --config " + ATCconfig.get('logpoint_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file else: cmd = ATCconfig.get('sigmac_path') + ' --shoot-yourself-in-the-foot -t "' + \ query + '" --ignore-backend-errors "' + self.yaml_file + '"' #query + " --ignore-backend-errors " + self.yaml_file + \ #" 2> /dev/null" #p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) query2 = subprocess.getoutput(cmd) # Wait for date to terminate. Get return returncode # p_status = p.wait() #p.wait() """ Had to remove '-' due to problems with Jinja2 variable naming, e.g es-qs throws error 'no es variable' """ det_queries[query] = str(query2)#[2:-3] # Update detection rules self.fields.update({"det_queries": det_queries}) self.fields.update({"queries": queries}) # Data Needed data_needed = ATCutils.main_dn_calculatoin_func(self.yaml_file) # if there is only 1 element in the list, print it as a string, # without quotes # if isistance(data_needed, list) and len(data_needed) == 1: # [data_needed] = data_needed # print("%s || Dataneeded: \n%s\n" % # (self.fields.get("title"), data_needed)) self.fields.update({'data_needed': sorted(data_needed)}) # Enrichments enrichments = self.fields.get("enrichment") if isinstance(enrichments, str): enrichments = [enrichments] self.fields.update({'enrichment': enrichments}) # MITRE ATT&CK Tactics and Techniques tactic = [] tactic_re = re.compile(r'attack\.\w\D+$') technique = [] technique_re = re.compile(r'(?:attack\.t\d{4}$|attack\.t\d{4}\.\d{3}$)') # AM!TT Tactics and Techniques amitt_tactic = [] amitt_tactic_re = re.compile(r'amitt\.\w\D+$') amitt_technique = [] amitt_technique_re = re.compile(r'amitt\.t\d{1,5}$') other_tags = [] if self.fields.get('tags'): for tag in self.fields.get('tags'): if tactic_re.match(tag): if ta_mapping.get(tag): tactic.append(ta_mapping.get(tag)) else: other_tags.append(tag) elif amitt_tactic_re.match(tag): if amitt_tactic_mapping.get(tag): amitt_tactic.append(amitt_tactic_mapping.get(tag)) else: other_tags.append(tag) elif technique_re.match(tag): te = tag.upper()[7:] technique.append((te_mapping.get(te), te)) elif amitt_technique_re.match(tag): te = tag.upper()[6:] amitt_technique.append((amitt_technique_mapping.get(te), te)) else: other_tags.append(tag) if len(tactic): self.fields.update({'tactics': tactic}) if len(technique): self.fields.update({'techniques': technique}) if len(amitt_tactic): self.fields.update({'amitt_tactics': amitt_tactic}) if len(amitt_technique): self.fields.update({'amitt_techniques': amitt_technique}) if len(other_tags): self.fields.update({'other_tags': other_tags}) triggers = [] for trigger in technique: if trigger == "None": continue trigger_name, trigger_id = trigger # Check if a directory for a technique exists in atomic red team repo if os.path.isdir(ATCconfig.get('triggers_directory') + '/' + trigger_id): triggers.append(trigger) else: print(trigger_id + ": No atomics trigger for this technique") """ triggers.append( trigger + ": No atomics trigger for this technique" ) """ self.fields.update( {'description': self.fields.get('description').strip()}) self.fields.update({'triggers': triggers}) elif template_type == "confluence": template = env.get_template( 'confluence_alert_template.html.j2') self.fields.update( {'confluence_viewpage_url': ATCconfig.get('confluence_viewpage_url')}) sigma_rule = ATCutils.read_rule_file(self.yaml_file) self.fields.update({'sigma_rule': sigma_rule}) #outputs = ["es-qs", "xpack-watcher", "graylog"] queries = ATCconfig.get('detection_queries').split(",") # dict to store query key + query values det_queries = {} for output in queries: if output == "powershell": cmd = ATCconfig.get('sigmac_path') + " -t " + output + \ " --config " + ATCconfig.get('powershell_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif output == "es-qs": cmd = ATCconfig.get('sigmac_path') + " -t " + output + \ " --config " + ATCconfig.get('es-qs_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif output == "xpack-watcher": cmd = ATCconfig.get('sigmac_path') + " -t " + output + \ " --config " + ATCconfig.get('xpack-watcher_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif output == "splunk": cmd = ATCconfig.get('sigmac_path') + " -t " + output + \ " --config " + ATCconfig.get('splunk_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file elif output == "logpoint": cmd = ATCconfig.get('sigmac_path') + " -t " + output + \ " --config " + ATCconfig.get('logpoint_sigma_config') + \ " --ignore-backend-errors " + self.yaml_file else: cmd = ATCconfig.get('sigmac_path') + ' --shoot-yourself-in-the-foot -t "' + \ output + '" --ignore-backend-errors "' + self.yaml_file + '"' p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) (query, err) = p.communicate() # Wait for date to terminate. Get return returncode ## # p_status = p.wait() p.wait() # have to remove '-' due to problems with # Jinja2 variable naming,e.g es-qs throws error # 'no es variable' #self.fields.update({output.replace("-", ""): str(query)[2:-3]}) det_queries[output] = str(query)[2:-3].replace("\\n", "\n") # Update detection rules self.fields.update({"det_queries": det_queries}) self.fields.update({"queries": queries}) # Data Needed data_needed = ATCutils.main_dn_calculatoin_func(self.yaml_file) data_needed_with_id = [] for data in sorted(data_needed): data_needed_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, data)) data = (data, data_needed_id) data_needed_with_id.append(data) self.fields.update({'data_needed': data_needed_with_id}) # Enrichments enrichments = self.fields.get("enrichment") enrichments_with_page_id = [] if isinstance(enrichments, str): enrichments = [enrichments] if enrichments: for enrichment_name in enrichments: enrichment_page_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, enrichment_name)) enrichment_data = (enrichment_name, enrichment_page_id) enrichments_with_page_id.append(enrichment_data) self.fields.update({'enrichment': enrichments_with_page_id}) # MITRE ATT&CK Tactics and Techniques tactic = [] tactic_re = re.compile(r'attack\.\w\D+$') technique = [] technique_re = re.compile(r'attack\.t\d{1,5}(\.\d{3})?$') # AM!TT Tactics and Techniques amitt_tactic = [] amitt_tactic_re = re.compile(r'amitt\.\w\D+$') amitt_technique = [] amitt_technique_re = re.compile(r'amitt\.t\d{1,5}$') other_tags = [] if self.fields.get('tags'): for tag in self.fields.get('tags'): if tactic_re.match(tag): if ta_mapping.get(tag): tactic.append(ta_mapping.get(tag)) else: other_tags.append(tag) elif amitt_tactic_re.match(tag): if amitt_tactic_mapping.get(tag): amitt_tactic.append(amitt_tactic_mapping.get(tag)) else: other_tags.append(tag) elif technique_re.match(tag): te = tag.upper()[7:] technique.append((te_mapping.get(te), te)) elif amitt_technique_re.match(tag): te = tag.upper()[6:] amitt_technique.append((amitt_technique_mapping.get(te), te)) else: other_tags.append(tag) if len(tactic): self.fields.update({'tactics': tactic}) if len(technique): self.fields.update({'techniques': technique}) if len(amitt_tactic): self.fields.update({'amitt_tactics': amitt_tactic}) if len(technique): self.fields.update({'amitt_techniques': amitt_technique}) if len(other_tags): self.fields.update({'other_tags': other_tags}) triggers = [] for trigger_name, trigger_id in technique: if trigger_id == "None": continue try: page_name = trigger_id + ": " + trigger_name trigger_page_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, page_name)) trigger = (trigger_name, trigger_id, trigger_page_id) triggers.append(trigger) except FileNotFoundError: print(trigger + ": No atomics trigger for this technique") self.fields.update({'triggers': triggers}) self.content = template.render(self.fields) # Need to convert ampersand into HTML "save" format # Otherwise confluence throws an error # self.content = self.content.replace("&", "&") # Done in the template itself return True
Render template with data in it template_type: - "markdown" - "confluence"
render_template
python
atc-project/atomic-threat-coverage
scripts/detectionrule.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/detectionrule.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory') + '/'): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" # Should return True return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/detectionrule.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/detectionrule.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/hardeningpolicy.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/hardeningpolicy.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/mitigationpolicy.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/mitigationpolicy.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/mitigationsystem.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/mitigationsystem.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/triggers.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/triggers.py
Apache-2.0
def get_rules(self): """ Retruns list of detection rules for usecase """ dr_list_per_usecase = [rules_by_title.get(dr_title)[0] for dr_title in self.detection_rules] return dr_list_per_usecase
Retruns list of detection rules for usecase
get_rules
python
atc-project/atomic-threat-coverage
scripts/usecases.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/usecases.py
Apache-2.0
def render_template(self, template_type): """Render template with data in it template_type: - "markdown" - "confluence" """ if template_type not in ["markdown", "confluence"]: raise Exception( "Bad template_type. Available values: " + "[\"markdown\", \"confluence\"]") self.uc_fields.update( {'description': self.description.strip()} ) # Transform variables to arrays if not provided correctly in yaml if isinstance(self.data_needed, str): self.uc_fields.update({'dataneeded': [self.data_needed]}) if isinstance(self.logging_policies, str): self.uc_fields.update({'loggingpolicy': [self.logging_policies]}) detectionrule_with_path = [] for title in self.detection_rules: if title is not None: name = rules_by_title.get(title)[1] else: name = '' dr = (title, name) detectionrule_with_path.append(dr) self.uc_fields.update({'detectionrule': detectionrule_with_path}) # Get proper template if template_type == "markdown": template = env\ .get_template('markdown_usecase_template.md.j2') elif template_type == "confluence": template = env.get_template( 'confluence_usecase_template.html.j2') self.uc_fields.update( {'confluence_viewpage_url': ATCconfig.get('confluence_viewpage_url')}) if not self.logging_policies: self.logging_policies = ["None", ] logging_policies_with_id = [] for lp in self.logging_policies: if lp != "None" and self.apipath and self.auth and self.space: logging_policies_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, lp)) else: logging_policies_id = "" lp = (lp, logging_policies_id) logging_policies_with_id.append(lp) self.uc_fields.update({'loggingpolicy': logging_policies_with_id}) if not self.data_needed: self.data_needed = ["None", ] data_needed_with_id = [] for dn in self.data_needed: if dn != "None" and self.apipath and self.auth and self.space: data_needed_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: data_needed_id = "" dn = (dn, data_needed_id) data_needed_with_id.append(dn) self.uc_fields.update({'data_needed': data_needed_with_id}) detection_rules_with_id = [] for dn in self.detection_rules: if dn != "None" and self.apipath and self.auth and self.space: detection_rules_id = str(ATCutils.confluence_get_page_id( self.apipath, self.auth, self.space, dn)) else: detection_rules_id = "" dn = (dn, detection_rules_id) detection_rules_with_id.append(dn) self.uc_fields.update({'detectionrule': detection_rules_with_id}) self.content = template.render(self.uc_fields) return True
Render template with data in it template_type: - "markdown" - "confluence"
render_template
python
atc-project/atomic-threat-coverage
scripts/usecases.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/usecases.py
Apache-2.0
def save_markdown_file(self, atc_dir=ATCconfig.get('md_name_of_root_directory')): """Write content (md template filled with data) to a file""" base = os.path.basename(self.yaml_file) title = os.path.splitext(base)[0] file_path = atc_dir + self.parent_title + "/" + \ title + ".md" return ATCutils.write_file(file_path, self.content)
Write content (md template filled with data) to a file
save_markdown_file
python
atc-project/atomic-threat-coverage
scripts/usecases.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/usecases.py
Apache-2.0
def __init__(self, id, field, enabled=None): """field - field name which should be averaged""" super().__init__( id=id, enabled=enabled, type="avg", schema="metric", params={ "field": field } )
field - field name which should be averaged
__init__
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/aggs.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/aggs.py
Apache-2.0
def __init__(self, id, field, aggregate_with, size, sort_order, sort_field, enabled=None): """aggregate_with - can be average, max, min or sum size - integer sort_order - can be asc or dsc """ super().__init__( id=id, enabled=enabled, params={ "aggregate": aggregate_with, "field": field, "size": size, "sortField": sort_field, "sortOrder": sort_order }, schema="metric", type="top_hits" )
aggregate_with - can be average, max, min or sum size - integer sort_order - can be asc or dsc
__init__
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/aggs.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/aggs.py
Apache-2.0
def validate(self): # TODO: Write custom validate (validate every required field) """ sort_order - either `asc` or `desc` size - int positive number aggregate_with - ["average", "concat", "min", "max", "sum"] """ return super().validate()
sort_order - either `asc` or `desc` size - int positive number aggregate_with - ["average", "concat", "min", "max", "sum"]
validate
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/aggs.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/aggs.py
Apache-2.0
def search_id_of_title_by_type(cls, search_type, search_title): """Returns an ID (string) of an object searched using object title search_type - string in ["index-pattern", "search"] search_title - string """ search_type = search_type.lower() if search_type not in ["index-pattern", "search"]: raise Exception("Search type (%s) not supported" % search_type) if cls.check_kibana_vars(): result_dict = {} total_pages = int() current_page = 1 suffix = "api/saved_objects/_find?" + \ "type=%s&fields=title&fields=id" % search_type r = requests.get(cls.kibana_url + suffix) if r.json().get("total"): total_pages = r.json().get("total") while current_page <= total_pages: if r.json().get("saved_objects"): for item in r.json().get("saved_objects"): if item.get("attributes"): result_dict[item.get("attributes").get("title")] =\ item.get('id') if search_title in result_dict.keys(): return result_dict[search_title] else: current_page += 1 r = requests.get( cls.kibana_url + suffix + "&pages=%s" % current_page ) del(result_dict) return None
Returns an ID (string) of an object searched using object title search_type - string in ["index-pattern", "search"] search_title - string
search_id_of_title_by_type
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/base.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/base.py
Apache-2.0
def get_all(self): """Get all saved objects and put them into visualization/dashboards""" r = self.es.search( index='.kibana*', doc_type='', body={'query': {'match_all': {}}}, size=self.search_limit_size, ) for obj in r['hits']['hits']: if obj.get('_source'): if obj['_source'].get('type'): _type = obj['_source']['type'] if _type == 'visualization': self.visualizations.append(Visualizations(obj)) if _type == 'dashboard': self.dashboards.append(Dashboards(obj))
Get all saved objects and put them into visualization/dashboards
get_all
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/kibana_api.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/kibana_api.py
Apache-2.0
def json_export_gui(self, return_dict=False, uuid_=None): """visState has to be a string with escaped doublequotes""" if self.validate(): # self.updated_at = datetime.datetime.today().isoformat() + "Z" # TODO: Find proper way to do below line :)) tmp_dictionary = literal_eval(str(self.__dict__)) if uuid_: tmp_dictionary["_id"] = uuid_ else: tmp_dictionary["_id"] = str(uuid.uuid4()) tmp_dictionary["_type"] = tmp_dictionary.pop("type") tmp_dictionary["visualization"]["visState"] = json.dumps( tmp_dictionary["visualization"]["visState"] ) tmp_dictionary.pop("metric_id", None) tmp_dictionary.pop("updated_at", None) tmp_dictionary.pop("_meta_data_set", None) kbsvd = tmp_dictionary["visualization"]["kibanaSavedObjectMeta"] kbsvd["searchSourceJSON"] = json.dumps( tmp_dictionary.get("visualization") .get("kibanaSavedObjectMeta") .get("searchSourceJSON") ) tmp_dictionary["_source"] = tmp_dictionary.pop("visualization") if return_dict: return tmp_dictionary else: return json.dumps(tmp_dictionary) else: raise Exception("Data validation failed")
visState has to be a string with escaped doublequotes
json_export_gui
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/visualisation.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/visualisation.py
Apache-2.0
def json_export_api(self, return_dict=False, uuid_=None): """visState has to be a string with escaped doublequotes""" if self.validate(): # self.updated_at = datetime.datetime.today().isoformat() + "Z" # TODO: Find proper way to do below line :)) tmp_dictionary = literal_eval(str(self.__dict__)) if uuid_: tmp_dictionary["id"] = uuid_ else: tmp_dictionary["id"] = str(uuid.uuid4()) tmp_dictionary["type"] = tmp_dictionary.pop("type") tmp_dictionary["visualization"]["visState"] = json.dumps( tmp_dictionary["visualization"]["visState"] ) tmp_dictionary.pop("metric_id", None) tmp_dictionary.pop("updated_at", None) tmp_dictionary.pop("_meta_data_set", None) kbsvd = tmp_dictionary["visualization"]["kibanaSavedObjectMeta"] kbsvd["searchSourceJSON"] = json.dumps( tmp_dictionary.get("visualization") .get("kibanaSavedObjectMeta") .get("searchSourceJSON") ) tmp_dictionary["attributes"] = tmp_dictionary.pop("visualization") tmp_dictionary["version"] = 1 if return_dict: return tmp_dictionary else: return json.dumps(tmp_dictionary) else: raise Exception("Data validation failed")
visState has to be a string with escaped doublequotes
json_export_api
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/visualisation.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/visualisation.py
Apache-2.0
def set_saved_search(self, saved_search_name=None, saved_search_id=None): """Provide ID if you know it and don't want to engage kibana""" if not saved_search_name and not saved_search_id: raise Exception( "What's the point of running this method without arguments?" ) _id = "" if saved_search_id: _id = saved_search_id else: if not self.check_kibana_vars(): raise Exception( "Cannot search for an ID if no access to Kibana!" ) _id = self.search_id_of_title_by_type( search_type="search", search_title=saved_search_name ) self.visualization.savedSearchId = _id self.visualization.kibanaSavedObjectMeta["searchSourceJSON"]\ .pop("index", None) self._meta_data_set = True
Provide ID if you know it and don't want to engage kibana
set_saved_search
python
atc-project/atomic-threat-coverage
scripts/atc_visualizations/visualisation.py
https://github.com/atc-project/atomic-threat-coverage/blob/master/scripts/atc_visualizations/visualisation.py
Apache-2.0
def validate(opts, model, loader, device, metrics, ret_samples_ids=None): """Do validation and return specified samples""" metrics.reset() ret_samples = [] if opts.save_val_results: if not os.path.exists('results'): os.mkdir('results') denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) img_id = 0 with torch.no_grad(): for i, (images, labels) in tqdm(enumerate(loader)): images = images.to(device, dtype=torch.float32) labels = labels.to(device, dtype=torch.long) outputs = model(images) preds = outputs.detach().max(dim=1)[1].cpu().numpy() targets = labels.cpu().numpy() metrics.update(targets, preds) if ret_samples_ids is not None and i in ret_samples_ids: # get vis samples ret_samples.append( (images[0].detach().cpu().numpy(), targets[0], preds[0])) if opts.save_val_results: for i in range(len(images)): image = images[i].detach().cpu().numpy() target = targets[i] pred = preds[i] image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8) target = loader.dataset.decode_target(target).astype(np.uint8) pred = loader.dataset.decode_target(pred).astype(np.uint8) Image.fromarray(image).save('results/%d_image.png' % img_id) Image.fromarray(target).save('results/%d_target.png' % img_id) Image.fromarray(pred).save('results/%d_pred.png' % img_id) fig = plt.figure() plt.imshow(image) plt.axis('off') plt.imshow(pred, alpha=0.7) ax = plt.gca() ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator()) ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator()) plt.savefig('results/%d_overlay.png' % img_id, bbox_inches='tight', pad_inches=0) plt.close() img_id += 1 score = metrics.get_results() return score, ret_samples
Do validation and return specified samples
validate
python
VainF/DeepLabV3Plus-Pytorch
main.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/main.py
MIT
def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is a tuple of all target types if target_type is a list with more than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation. """ image = Image.open(self.images[index]).convert('RGB') target = Image.open(self.targets[index]) if self.transform: image, target = self.transform(image, target) target = self.encode_target(target) return image, target
Args: index (int): Index Returns: tuple: (image, target) where target is a tuple of all target types if target_type is a list with more than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
__getitem__
python
VainF/DeepLabV3Plus-Pytorch
datasets/cityscapes.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/datasets/cityscapes.py
MIT
def download_url(url, root, filename=None, md5=None): """Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str): Name to save the file under. If None, use the basename of the URL md5 (str): MD5 checksum of the download. If None, do not check """ from six.moves import urllib root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) makedir_exist_ok(root) # downloads file if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: try: print('Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True)) ) except OSError: if url[:5] == 'https': url = url.replace('https:', 'http:') print('Failed download. Trying https -> http instead.' ' Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True)) )
Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str): Name to save the file under. If None, use the basename of the URL md5 (str): MD5 checksum of the download. If None, do not check
download_url
python
VainF/DeepLabV3Plus-Pytorch
datasets/utils.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/datasets/utils.py
MIT
def list_dir(root, prefix=False): """List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found """ root = os.path.expanduser(root) directories = list( filter( lambda p: os.path.isdir(os.path.join(root, p)), os.listdir(root) ) ) if prefix is True: directories = [os.path.join(root, d) for d in directories] return directories
List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found
list_dir
python
VainF/DeepLabV3Plus-Pytorch
datasets/utils.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/datasets/utils.py
MIT
def list_files(root, suffix, prefix=False): """List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found """ root = os.path.expanduser(root) files = list( filter( lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix), os.listdir(root) ) ) if prefix is True: files = [os.path.join(root, d) for d in files] return files
List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found
list_files
python
VainF/DeepLabV3Plus-Pytorch
datasets/utils.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/datasets/utils.py
MIT
def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is the image segmentation. """ img = Image.open(self.images[index]).convert('RGB') target = Image.open(self.masks[index]) if self.transform is not None: img, target = self.transform(img, target) return img, target
Args: index (int): Index Returns: tuple: (image, target) where target is the image segmentation.
__getitem__
python
VainF/DeepLabV3Plus-Pytorch
datasets/voc.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/datasets/voc.py
MIT
def get_results(self): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ hist = self.confusion_matrix acc = np.diag(hist).sum() / hist.sum() acc_cls = np.diag(hist) / hist.sum(axis=1) acc_cls = np.nanmean(acc_cls) iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) mean_iu = np.nanmean(iu) freq = hist.sum(axis=1) / hist.sum() fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() cls_iu = dict(zip(range(self.n_classes), iu)) return { "Overall Acc": acc, "Mean Acc": acc_cls, "FreqW Acc": fwavacc, "Mean IoU": mean_iu, "Class IoU": cls_iu, }
Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc
get_results
python
VainF/DeepLabV3Plus-Pytorch
metrics/stream_metrics.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/metrics/stream_metrics.py
MIT
def _make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return:
_make_divisible
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/mobilenetv2.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/mobilenetv2.py
MIT
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8): """ MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding """ super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 self.output_stride = output_stride current_stride = 1 if inverted_residual_setting is None: inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError("inverted_residual_setting should be non-empty " "or a 4-element list, got {}".format(inverted_residual_setting)) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] current_stride *= 2 dilation=1 previous_dilation = 1 # building inverted residual blocks for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) previous_dilation = dilation if current_stride == output_stride: stride = 1 dilation *= s else: stride = s current_stride *= s output_channel = int(c * width_mult) for i in range(n): if i==0: features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t)) else: features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t)) input_channel = output_channel # building last several layers features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) # make it nn.Sequential self.features = nn.Sequential(*features) # building classifier self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes), ) # weight initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias)
MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding
__init__
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/mobilenetv2.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/mobilenetv2.py
MIT
def mobilenet_v2(pretrained=False, progress=True, **kwargs): """ Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MobileNetV2(**kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], progress=progress) model.load_state_dict(state_dict) return model
Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
mobilenet_v2
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/mobilenetv2.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/mobilenetv2.py
MIT
def resnext50_32x4d(pretrained=False, progress=True, **kwargs): r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 4 return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
resnext50_32x4d
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/resnet.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/resnet.py
MIT
def resnext101_32x8d(pretrained=False, progress=True, **kwargs): r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 8 return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
resnext101_32x8d
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/resnet.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/resnet.py
MIT
def wide_resnet50_2(pretrained=False, progress=True, **kwargs): r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
wide_resnet50_2
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/resnet.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/resnet.py
MIT
def wide_resnet101_2(pretrained=False, progress=True, **kwargs): r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
wide_resnet101_2
python
VainF/DeepLabV3Plus-Pytorch
network/backbone/resnet.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/network/backbone/resnet.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Randomly flipped image. """ if random.random() < self.p: return F.hflip(img), F.hflip(lbl) return img, lbl
Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Randomly flipped image.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be scaled. lbl (PIL Image): Label to be scaled. Returns: PIL Image: Rescaled image. PIL Image: Rescaled label. """ assert img.size == lbl.size scale = random.uniform(self.scale_range[0], self.scale_range[1]) target_size = ( int(img.size[1]*scale), int(img.size[0]*scale) ) return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
Args: img (PIL Image): Image to be scaled. lbl (PIL Image): Label to be scaled. Returns: PIL Image: Rescaled image. PIL Image: Rescaled label.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be scaled. lbl (PIL Image): Label to be scaled. Returns: PIL Image: Rescaled image. PIL Image: Rescaled label. """ assert img.size == lbl.size target_size = ( int(img.size[1]*self.scale), int(img.size[0]*self.scale) ) # (H, W) return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
Args: img (PIL Image): Image to be scaled. lbl (PIL Image): Label to be scaled. Returns: PIL Image: Rescaled image. PIL Image: Rescaled label.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ img (PIL Image): Image to be rotated. lbl (PIL Image): Label to be rotated. Returns: PIL Image: Rotated image. PIL Image: Rotated label. """ angle = self.get_params(self.degrees) return F.rotate(img, angle, self.resample, self.expand, self.center), F.rotate(lbl, angle, self.resample, self.expand, self.center)
img (PIL Image): Image to be rotated. lbl (PIL Image): Label to be rotated. Returns: PIL Image: Rotated image. PIL Image: Rotated label.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Randomly flipped image. """ if random.random() < self.p: return F.hflip(img), F.hflip(lbl) return img, lbl
Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Randomly flipped image.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be flipped. lbl (PIL Image): Label to be flipped. Returns: PIL Image: Randomly flipped image. PIL Image: Randomly flipped label. """ if random.random() < self.p: return F.vflip(img), F.vflip(lbl) return img, lbl
Args: img (PIL Image): Image to be flipped. lbl (PIL Image): Label to be flipped. Returns: PIL Image: Randomly flipped image. PIL Image: Randomly flipped label.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, pic, lbl): """ Note that labels will not be normalized to [0, 1]. Args: pic (PIL Image or numpy.ndarray): Image to be converted to tensor. lbl (PIL Image or numpy.ndarray): Label to be converted to tensor. Returns: Tensor: Converted image and label """ if self.normalize: return F.to_tensor(pic), torch.from_numpy( np.array( lbl, dtype=self.target_type) ) else: return torch.from_numpy( np.array( pic, dtype=np.float32).transpose(2, 0, 1) ), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
Note that labels will not be normalized to [0, 1]. Args: pic (PIL Image or numpy.ndarray): Image to be converted to tensor. lbl (PIL Image or numpy.ndarray): Label to be converted to tensor. Returns: Tensor: Converted image and label
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def get_params(img, output_size): """Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. """ w, h = img.size th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw
Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
get_params
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Image to be cropped. lbl (PIL Image): Label to be cropped. Returns: PIL Image: Cropped image. PIL Image: Cropped label. """ assert img.size == lbl.size, 'size of img and lbl should be the same. %s, %s'%(img.size, lbl.size) if self.padding > 0: img = F.pad(img, self.padding) lbl = F.pad(lbl, self.padding) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: img = F.pad(img, padding=int((1 + self.size[1] - img.size[0]) / 2)) lbl = F.pad(lbl, padding=int((1 + self.size[1] - lbl.size[0]) / 2)) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: img = F.pad(img, padding=int((1 + self.size[0] - img.size[1]) / 2)) lbl = F.pad(lbl, padding=int((1 + self.size[0] - lbl.size[1]) / 2)) i, j, h, w = self.get_params(img, self.size) return F.crop(img, i, j, h, w), F.crop(lbl, i, j, h, w)
Args: img (PIL Image): Image to be cropped. lbl (PIL Image): Label to be cropped. Returns: PIL Image: Cropped image. PIL Image: Cropped label.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def get_params(brightness, contrast, saturation, hue): """Get a randomized transform to be applied on image. Arguments are same as that of __init__. Returns: Transform which randomly adjusts brightness, contrast and saturation in a random order. """ transforms = [] if brightness is not None: brightness_factor = random.uniform(brightness[0], brightness[1]) transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor))) if contrast is not None: contrast_factor = random.uniform(contrast[0], contrast[1]) transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor))) if saturation is not None: saturation_factor = random.uniform(saturation[0], saturation[1]) transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor))) if hue is not None: hue_factor = random.uniform(hue[0], hue[1]) transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor))) random.shuffle(transforms) transform = Compose(transforms) return transform
Get a randomized transform to be applied on image. Arguments are same as that of __init__. Returns: Transform which randomly adjusts brightness, contrast and saturation in a random order.
get_params
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def __call__(self, img, lbl): """ Args: img (PIL Image): Input image. Returns: PIL Image: Color jittered image. """ transform = self.get_params(self.brightness, self.contrast, self.saturation, self.hue) return transform(img), lbl
Args: img (PIL Image): Input image. Returns: PIL Image: Color jittered image.
__call__
python
VainF/DeepLabV3Plus-Pytorch
utils/ext_transforms.py
https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/utils/ext_transforms.py
MIT
def test_geosearch(self): """Test parsing a Wikipedia location request result.""" self.assertEqual( wikipedia.geosearch(Decimal('40.67693'), Decimal('117.23193')), mock_data['data']["great_wall_of_china.geo_seach"] )
Test parsing a Wikipedia location request result.
test_geosearch
python
goldsmith/Wikipedia
tests/geosearch_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/geosearch_test.py
MIT
def test_geosearch_with_radius(self): """Test parsing a Wikipedia location request result.""" self.assertEqual(wikipedia.geosearch( Decimal('40.67693'), Decimal('117.23193'), radius=10000), mock_data['data']["great_wall_of_china.geo_seach_with_radius"] )
Test parsing a Wikipedia location request result.
test_geosearch_with_radius
python
goldsmith/Wikipedia
tests/geosearch_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/geosearch_test.py
MIT
def test_geosearch_with_existing_title(self): """Test parsing a Wikipedia location request result.""" self.assertEqual(wikipedia.geosearch( Decimal('40.67693'), Decimal('117.23193'), title='Great Wall of China'), mock_data['data']["great_wall_of_china.geo_seach_with_existing_article_name"] )
Test parsing a Wikipedia location request result.
test_geosearch_with_existing_title
python
goldsmith/Wikipedia
tests/geosearch_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/geosearch_test.py
MIT
def test_missing(self): """Test that page raises a PageError for a nonexistant page.""" # Callicarpa? purpleberry = lambda: wikipedia.page("purpleberry", auto_suggest=False) self.assertRaises(wikipedia.PageError, purpleberry)
Test that page raises a PageError for a nonexistant page.
test_missing
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_redirect_true(self): """Test that a page successfully redirects a query.""" # no error should be raised if redirect is test_redirect_true mp = wikipedia.page("Menlo Park, New Jersey") self.assertEqual(mp.title, "Edison, New Jersey") self.assertEqual(mp.url, "http://en.wikipedia.org/wiki/Edison,_New_Jersey")
Test that a page successfully redirects a query.
test_redirect_true
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_redirect_no_normalization(self): """Test that a page with redirects but no normalization query loads correctly""" the_party = wikipedia.page("Communist Party", auto_suggest=False) self.assertIsInstance(the_party, wikipedia.WikipediaPage) self.assertEqual(the_party.title, "Communist party")
Test that a page with redirects but no normalization query loads correctly
test_redirect_no_normalization
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_redirect_with_normalization(self): """Test that a page redirect with a normalized query loads correctly""" the_party = wikipedia.page("communist Party", auto_suggest=False) self.assertIsInstance(the_party, wikipedia.WikipediaPage) self.assertEqual(the_party.title, "Communist party")
Test that a page redirect with a normalized query loads correctly
test_redirect_with_normalization
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_redirect_normalization(self): """Test that a page redirect loads correctly with or without a query normalization""" capital_party = wikipedia.page("Communist Party", auto_suggest=False) lower_party = wikipedia.page("communist Party", auto_suggest=False) self.assertIsInstance(capital_party, wikipedia.WikipediaPage) self.assertIsInstance(lower_party, wikipedia.WikipediaPage) self.assertEqual(capital_party.title, "Communist party") self.assertEqual(capital_party, lower_party)
Test that a page redirect loads correctly with or without a query normalization
test_redirect_normalization
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_disambiguate(self): """Test that page raises an error when a disambiguation page is reached.""" try: ram = wikipedia.page("Dodge Ram (disambiguation)", auto_suggest=False, redirect=False) error_raised = False except wikipedia.DisambiguationError as e: error_raised = True options = e.options self.assertTrue(error_raised) self.assertEqual(options, [u'Dodge Ramcharger', u'Dodge Ram Van', u'Dodge Mini Ram', u'Dodge Caravan C/V', u'Dodge Caravan C/V', u'Ram C/V', u'Dodge Ram 50', u'Dodge D-Series', u'Dodge Rampage', u'Ram (brand)'])
Test that page raises an error when a disambiguation page is reached.
test_disambiguate
python
goldsmith/Wikipedia
tests/page_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/page_test.py
MIT
def test_suggestion(self): """Test getting a suggestion as well as search results.""" search, suggestion = wikipedia.search("hallelulejah", suggestion=True) self.assertEqual(search, []) self.assertEqual(suggestion, u'hallelujah')
Test getting a suggestion as well as search results.
test_suggestion
python
goldsmith/Wikipedia
tests/search_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/search_test.py
MIT
def test_suggestion_none(self): """Test getting a suggestion when there is no suggestion.""" search, suggestion = wikipedia.search("qmxjsudek", suggestion=True) self.assertEqual(search, []) self.assertEqual(suggestion, None)
Test getting a suggestion when there is no suggestion.
test_suggestion_none
python
goldsmith/Wikipedia
tests/search_test.py
https://github.com/goldsmith/Wikipedia/blob/master/tests/search_test.py
MIT
def set_lang(prefix): ''' Change the language of the API being requested. Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_. After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared. .. note:: Make sure you search for page titles in the language that you have set. ''' global API_URL API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php' for cached_func in (search, suggest, summary): cached_func.clear_cache()
Change the language of the API being requested. Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_. After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared. .. note:: Make sure you search for page titles in the language that you have set.
set_lang
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def set_user_agent(user_agent_string): ''' Set the User-Agent string to be used for all requests. Arguments: * user_agent_string - (string) a string specifying the User-Agent header ''' global USER_AGENT USER_AGENT = user_agent_string
Set the User-Agent string to be used for all requests. Arguments: * user_agent_string - (string) a string specifying the User-Agent header
set_user_agent
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def search(query, results=10, suggestion=False): ''' Do a Wikipedia search for `query`. Keyword arguments: * results - the maxmimum number of results returned * suggestion - if True, return results and suggestion (if any) in a tuple ''' search_params = { 'list': 'search', 'srprop': '', 'srlimit': results, 'limit': results, 'srsearch': query } if suggestion: search_params['srinfo'] = 'suggestion' raw_results = _wiki_request(search_params) if 'error' in raw_results: if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'): raise HTTPTimeoutError(query) else: raise WikipediaException(raw_results['error']['info']) search_results = (d['title'] for d in raw_results['query']['search']) if suggestion: if raw_results['query'].get('searchinfo'): return list(search_results), raw_results['query']['searchinfo']['suggestion'] else: return list(search_results), None return list(search_results)
Do a Wikipedia search for `query`. Keyword arguments: * results - the maxmimum number of results returned * suggestion - if True, return results and suggestion (if any) in a tuple
search
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def geosearch(latitude, longitude, title=None, results=10, radius=1000): ''' Do a wikipedia geo search for `latitude` and `longitude` using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData Arguments: * latitude (float or decimal.Decimal) * longitude (float or decimal.Decimal) Keyword arguments: * title - The title of an article to search for * results - the maximum number of results returned * radius - Search radius in meters. The value must be between 10 and 10000 ''' search_params = { 'list': 'geosearch', 'gsradius': radius, 'gscoord': '{0}|{1}'.format(latitude, longitude), 'gslimit': results } if title: search_params['titles'] = title raw_results = _wiki_request(search_params) if 'error' in raw_results: if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'): raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude)) else: raise WikipediaException(raw_results['error']['info']) search_pages = raw_results['query'].get('pages', None) if search_pages: search_results = (v['title'] for k, v in search_pages.items() if k != '-1') else: search_results = (d['title'] for d in raw_results['query']['geosearch']) return list(search_results)
Do a wikipedia geo search for `latitude` and `longitude` using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData Arguments: * latitude (float or decimal.Decimal) * longitude (float or decimal.Decimal) Keyword arguments: * title - The title of an article to search for * results - the maximum number of results returned * radius - Search radius in meters. The value must be between 10 and 10000
geosearch
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def suggest(query): ''' Get a Wikipedia search suggestion for `query`. Returns a string or None if no suggestion was found. ''' search_params = { 'list': 'search', 'srinfo': 'suggestion', 'srprop': '', } search_params['srsearch'] = query raw_result = _wiki_request(search_params) if raw_result['query'].get('searchinfo'): return raw_result['query']['searchinfo']['suggestion'] return None
Get a Wikipedia search suggestion for `query`. Returns a string or None if no suggestion was found.
suggest
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def random(pages=1): ''' Get a list of random Wikipedia article titles. .. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. Keyword arguments: * pages - the number of random pages returned (max of 10) ''' #http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm query_params = { 'list': 'random', 'rnnamespace': 0, 'rnlimit': pages, } request = _wiki_request(query_params) titles = [page['title'] for page in request['query']['random']] if len(titles) == 1: return titles[0] return titles
Get a list of random Wikipedia article titles. .. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. Keyword arguments: * pages - the number of random pages returned (max of 10)
random
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def summary(title, sentences=0, chars=0, auto_suggest=True, redirect=True): ''' Plain text summary of the page. .. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default Keyword arguments: * sentences - if set, return the first `sentences` sentences (can be no greater than 10). * chars - if set, return only the first `chars` characters (actual text returned may be slightly longer). * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError ''' # use auto_suggest and redirect to get the correct article # also, use page's error checking to raise DisambiguationError if necessary page_info = page(title, auto_suggest=auto_suggest, redirect=redirect) title = page_info.title pageid = page_info.pageid query_params = { 'prop': 'extracts', 'explaintext': '', 'titles': title } if sentences: query_params['exsentences'] = sentences elif chars: query_params['exchars'] = chars else: query_params['exintro'] = '' request = _wiki_request(query_params) summary = request['query']['pages'][pageid]['extract'] return summary
Plain text summary of the page. .. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default Keyword arguments: * sentences - if set, return the first `sentences` sentences (can be no greater than 10). * chars - if set, return only the first `chars` characters (actual text returned may be slightly longer). * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError
summary
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False): ''' Get a WikipediaPage object for the page with title `title` or the pageid `pageid` (mutually exclusive). Keyword arguments: * title - the title of the page to load * pageid - the numeric pageid of the page to load * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError * preload - load content, summary, images, references, and links during initialization ''' if title is not None: if auto_suggest: results, suggestion = search(title, results=1, suggestion=True) try: title = suggestion or results[0] except IndexError: # if there is no suggestion or search results, the page doesn't exist raise PageError(title) return WikipediaPage(title, redirect=redirect, preload=preload) elif pageid is not None: return WikipediaPage(pageid=pageid, preload=preload) else: raise ValueError("Either a title or a pageid must be specified")
Get a WikipediaPage object for the page with title `title` or the pageid `pageid` (mutually exclusive). Keyword arguments: * title - the title of the page to load * pageid - the numeric pageid of the page to load * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError * preload - load content, summary, images, references, and links during initialization
page
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def __load(self, redirect=True, preload=False): ''' Load basic information from Wikipedia. Confirm that page exists and is not a disambiguation/redirect. Does not need to be called manually, should be called automatically during __init__. ''' query_params = { 'prop': 'info|pageprops', 'inprop': 'url', 'ppprop': 'disambiguation', 'redirects': '', } if not getattr(self, 'pageid', None): query_params['titles'] = self.title else: query_params['pageids'] = self.pageid request = _wiki_request(query_params) query = request['query'] pageid = list(query['pages'].keys())[0] page = query['pages'][pageid] # missing is present if the page is missing if 'missing' in page: if hasattr(self, 'title'): raise PageError(self.title) else: raise PageError(pageid=self.pageid) # same thing for redirect, except it shows up in query instead of page for # whatever silly reason elif 'redirects' in query: if redirect: redirects = query['redirects'][0] if 'normalized' in query: normalized = query['normalized'][0] assert normalized['from'] == self.title, ODD_ERROR_MESSAGE from_title = normalized['to'] else: from_title = self.title assert redirects['from'] == from_title, ODD_ERROR_MESSAGE # change the title and reload the whole object self.__init__(redirects['to'], redirect=redirect, preload=preload) else: raise RedirectError(getattr(self, 'title', page['title'])) # since we only asked for disambiguation in ppprop, # if a pageprop is returned, # then the page must be a disambiguation page elif 'pageprops' in page: query_params = { 'prop': 'revisions', 'rvprop': 'content', 'rvparse': '', 'rvlimit': 1 } if hasattr(self, 'pageid'): query_params['pageids'] = self.pageid else: query_params['titles'] = self.title request = _wiki_request(query_params) html = request['query']['pages'][pageid]['revisions'][0]['*'] lis = BeautifulSoup(html, 'html.parser').find_all('li') filtered_lis = [li for li in lis if not 'tocsection' in ''.join(li.get('class', []))] may_refer_to = [li.a.get_text() for li in filtered_lis if li.a] raise DisambiguationError(getattr(self, 'title', page['title']), may_refer_to) else: self.pageid = pageid self.title = page['title'] self.url = page['fullurl']
Load basic information from Wikipedia. Confirm that page exists and is not a disambiguation/redirect. Does not need to be called manually, should be called automatically during __init__.
__load
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def __continued_query(self, query_params): ''' Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries ''' query_params.update(self.__title_query_param) last_continue = {} prop = query_params.get('prop', None) while True: params = query_params.copy() params.update(last_continue) request = _wiki_request(params) if 'query' not in request: break pages = request['query']['pages'] if 'generator' in query_params: for datum in pages.values(): # in python 3.3+: "yield from pages.values()" yield datum else: for datum in pages[self.pageid][prop]: yield datum if 'continue' not in request: break last_continue = request['continue']
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
__continued_query
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def html(self): ''' Get full page HTML. .. warning:: This can get pretty slow on long pages. ''' if not getattr(self, '_html', False): query_params = { 'prop': 'revisions', 'rvprop': 'content', 'rvlimit': 1, 'rvparse': '', 'titles': self.title } request = _wiki_request(query_params) self._html = request['query']['pages'][self.pageid]['revisions'][0]['*'] return self._html
Get full page HTML. .. warning:: This can get pretty slow on long pages.
html
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def content(self): ''' Plain text content of the page, excluding images, tables, and other data. ''' if not getattr(self, '_content', False): query_params = { 'prop': 'extracts|revisions', 'explaintext': '', 'rvprop': 'ids' } if not getattr(self, 'title', None) is None: query_params['titles'] = self.title else: query_params['pageids'] = self.pageid request = _wiki_request(query_params) self._content = request['query']['pages'][self.pageid]['extract'] self._revision_id = request['query']['pages'][self.pageid]['revisions'][0]['revid'] self._parent_id = request['query']['pages'][self.pageid]['revisions'][0]['parentid'] return self._content
Plain text content of the page, excluding images, tables, and other data.
content
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def revision_id(self): ''' Revision ID of the page. The revision ID is a number that uniquely identifies the current version of the page. It can be used to create the permalink or for other direct API calls. See `Help:Page history <http://en.wikipedia.org/wiki/Wikipedia:Revision>`_ for more information. ''' if not getattr(self, '_revid', False): # fetch the content (side effect is loading the revid) self.content return self._revision_id
Revision ID of the page. The revision ID is a number that uniquely identifies the current version of the page. It can be used to create the permalink or for other direct API calls. See `Help:Page history <http://en.wikipedia.org/wiki/Wikipedia:Revision>`_ for more information.
revision_id
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def parent_id(self): ''' Revision ID of the parent version of the current revision of this page. See ``revision_id`` for more information. ''' if not getattr(self, '_parentid', False): # fetch the content (side effect is loading the revid) self.content return self._parent_id
Revision ID of the parent version of the current revision of this page. See ``revision_id`` for more information.
parent_id
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def summary(self): ''' Plain text summary of the page. ''' if not getattr(self, '_summary', False): query_params = { 'prop': 'extracts', 'explaintext': '', 'exintro': '', } if not getattr(self, 'title', None) is None: query_params['titles'] = self.title else: query_params['pageids'] = self.pageid request = _wiki_request(query_params) self._summary = request['query']['pages'][self.pageid]['extract'] return self._summary
Plain text summary of the page.
summary
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def images(self): ''' List of URLs of images on the page. ''' if not getattr(self, '_images', False): self._images = [ page['imageinfo'][0]['url'] for page in self.__continued_query({ 'generator': 'images', 'gimlimit': 'max', 'prop': 'imageinfo', 'iiprop': 'url', }) if 'imageinfo' in page ] return self._images
List of URLs of images on the page.
images
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def coordinates(self): ''' Tuple of Decimals in the form of (lat, lon) or None ''' if not getattr(self, '_coordinates', False): query_params = { 'prop': 'coordinates', 'colimit': 'max', 'titles': self.title, } request = _wiki_request(query_params) if 'query' in request: coordinates = request['query']['pages'][self.pageid]['coordinates'] self._coordinates = (Decimal(coordinates[0]['lat']), Decimal(coordinates[0]['lon'])) else: self._coordinates = None return self._coordinates
Tuple of Decimals in the form of (lat, lon) or None
coordinates
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def references(self): ''' List of URLs of external links on a page. May include external links within page that aren't technically cited anywhere. ''' if not getattr(self, '_references', False): def add_protocol(url): return url if url.startswith('http') else 'http:' + url self._references = [ add_protocol(link['*']) for link in self.__continued_query({ 'prop': 'extlinks', 'ellimit': 'max' }) ] return self._references
List of URLs of external links on a page. May include external links within page that aren't technically cited anywhere.
references
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def links(self): ''' List of titles of Wikipedia page links on a page. .. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. ''' if not getattr(self, '_links', False): self._links = [ link['title'] for link in self.__continued_query({ 'prop': 'links', 'plnamespace': 0, 'pllimit': 'max' }) ] return self._links
List of titles of Wikipedia page links on a page. .. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
links
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def sections(self): ''' List of section titles from the table of contents on the page. ''' if not getattr(self, '_sections', False): query_params = { 'action': 'parse', 'prop': 'sections', } if not getattr(self, 'title', None) is None: query_params["page"] = self.title request = _wiki_request(query_params) self._sections = [section['line'] for section in request['parse']['sections']] return self._sections
List of section titles from the table of contents on the page.
sections
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def section(self, section_title): ''' Get the plain text content of a section from `self.sections`. Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string. This is a convenience method that wraps self.content. .. warning:: Calling `section` on a section that has subheadings will NOT return the full text of all of the subsections. It only gets the text between `section_title` and the next subheading, which is often empty. ''' section = u"== {} ==".format(section_title) try: index = self.content.index(section) + len(section) except ValueError: return None try: next_index = self.content.index("==", index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip("=").strip()
Get the plain text content of a section from `self.sections`. Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string. This is a convenience method that wraps self.content. .. warning:: Calling `section` on a section that has subheadings will NOT return the full text of all of the subsections. It only gets the text between `section_title` and the next subheading, which is often empty.
section
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def languages(): ''' List all the currently supported language prefixes (usually ISO language code). Can be inputted to `set_lang` to change the Mediawiki that `wikipedia` requests results from. Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes, use `wikipedia.languages().keys()`. ''' response = _wiki_request({ 'meta': 'siteinfo', 'siprop': 'languages' }) languages = response['query']['languages'] return { lang['code']: lang['*'] for lang in languages }
List all the currently supported language prefixes (usually ISO language code). Can be inputted to `set_lang` to change the Mediawiki that `wikipedia` requests results from. Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes, use `wikipedia.languages().keys()`.
languages
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def _wiki_request(params): ''' Make a request to the Wikipedia API using the given search parameters. Returns a parsed dict of the JSON response. ''' global RATE_LIMIT_LAST_CALL global USER_AGENT params['format'] = 'json' if not 'action' in params: params['action'] = 'query' headers = { 'User-Agent': USER_AGENT } if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \ RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now(): # it hasn't been long enough since the last API call # so wait until we're in the clear to make the request wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now() time.sleep(int(wait_time.total_seconds())) r = requests.get(API_URL, params=params, headers=headers) if RATE_LIMIT: RATE_LIMIT_LAST_CALL = datetime.now() return r.json()
Make a request to the Wikipedia API using the given search parameters. Returns a parsed dict of the JSON response.
_wiki_request
python
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/master/wikipedia/wikipedia.py
MIT
def __init__(self, guid, text, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text = text self.label = label
Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples.
__init__
python
ProHiryu/bert-chinese-ner
BERT_NER.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/BERT_NER.py
MIT
def precision(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro'): """Multi-class precision metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. Returns ------- tuple of (scalar float Tensor, update_op) """ cm, op = _streaming_confusion_matrix( labels, predictions, num_classes, weights) pr, _, _ = metrics_from_confusion_matrix( cm, pos_indices, average=average) op, _, _ = metrics_from_confusion_matrix( op, pos_indices, average=average) return (pr, op)
Multi-class precision metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. Returns ------- tuple of (scalar float Tensor, update_op)
precision
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def recall(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro'): """Multi-class recall metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. Returns ------- tuple of (scalar float Tensor, update_op) """ cm, op = _streaming_confusion_matrix( labels, predictions, num_classes, weights) _, re, _ = metrics_from_confusion_matrix( cm, pos_indices, average=average) _, op, _ = metrics_from_confusion_matrix( op, pos_indices, average=average) return (re, op)
Multi-class recall metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. Returns ------- tuple of (scalar float Tensor, update_op)
recall
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None, average='micro', beta=1): """Multi-class fbeta metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. beta : int, optional Weight of precision in harmonic mean Returns ------- tuple of (scalar float Tensor, update_op) """ cm, op = _streaming_confusion_matrix( labels, predictions, num_classes, weights) _, _, fbeta = metrics_from_confusion_matrix( cm, pos_indices, average=average, beta=beta) _, _, op = metrics_from_confusion_matrix( op, pos_indices, average=average, beta=beta) return (fbeta, op)
Multi-class fbeta metric for Tensorflow Parameters ---------- labels : Tensor of tf.int32 or tf.int64 The true labels predictions : Tensor of tf.int32 or tf.int64 The predictions, same shape as labels num_classes : int The number of classes pos_indices : list of int, optional The indices of the positive classes, default is all weights : Tensor of tf.int32, optional Mask, must be of compatible shape with labels average : str, optional 'micro': counts the total number of true positives, false positives, and false negatives for the classes in `pos_indices` and infer the metric from it. 'macro': will compute the metric separately for each class in `pos_indices` and average. Will not account for class imbalance. 'weighted': will compute the metric separately for each class in `pos_indices` and perform a weighted average by the total number of true labels for each class. beta : int, optional Weight of precision in harmonic mean Returns ------- tuple of (scalar float Tensor, update_op)
fbeta
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def safe_div(numerator, denominator): """Safe division, return 0 if denominator is 0""" numerator, denominator = tf.to_float(numerator), tf.to_float(denominator) zeros = tf.zeros_like(numerator, dtype=numerator.dtype) denominator_is_zero = tf.equal(denominator, zeros) return tf.where(denominator_is_zero, zeros, numerator / denominator)
Safe division, return 0 if denominator is 0
safe_div
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def pr_re_fbeta(cm, pos_indices, beta=1): """Uses a confusion matrix to compute precision, recall and fbeta""" num_classes = cm.shape[0] neg_indices = [i for i in range(num_classes) if i not in pos_indices] cm_mask = np.ones([num_classes, num_classes]) cm_mask[neg_indices, neg_indices] = 0 diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask)) cm_mask = np.ones([num_classes, num_classes]) cm_mask[:, neg_indices] = 0 tot_pred = tf.reduce_sum(cm * cm_mask) cm_mask = np.ones([num_classes, num_classes]) cm_mask[neg_indices, :] = 0 tot_gold = tf.reduce_sum(cm * cm_mask) pr = safe_div(diag_sum, tot_pred) re = safe_div(diag_sum, tot_gold) fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re) return pr, re, fbeta
Uses a confusion matrix to compute precision, recall and fbeta
pr_re_fbeta
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro', beta=1): """Precision, Recall and F1 from the confusion matrix Parameters ---------- cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes) The streaming confusion matrix. pos_indices : list of int, optional The indices of the positive classes beta : int, optional Weight of precision in harmonic mean average : str, optional 'micro', 'macro' or 'weighted' """ num_classes = cm.shape[0] if pos_indices is None: pos_indices = [i for i in range(num_classes)] if average == 'micro': return pr_re_fbeta(cm, pos_indices, beta) elif average in {'macro', 'weighted'}: precisions, recalls, fbetas, n_golds = [], [], [], [] for idx in pos_indices: pr, re, fbeta = pr_re_fbeta(cm, [idx], beta) precisions.append(pr) recalls.append(re) fbetas.append(fbeta) cm_mask = np.zeros([num_classes, num_classes]) cm_mask[idx, :] = 1 n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask))) if average == 'macro': pr = tf.reduce_mean(precisions) re = tf.reduce_mean(recalls) fbeta = tf.reduce_mean(fbetas) return pr, re, fbeta if average == 'weighted': n_gold = tf.reduce_sum(n_golds) pr_sum = sum(p * n for p, n in zip(precisions, n_golds)) pr = safe_div(pr_sum, n_gold) re_sum = sum(r * n for r, n in zip(recalls, n_golds)) re = safe_div(re_sum, n_gold) fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds)) fbeta = safe_div(fbeta_sum, n_gold) return pr, re, fbeta else: raise NotImplementedError()
Precision, Recall and F1 from the confusion matrix Parameters ---------- cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes) The streaming confusion matrix. pos_indices : list of int, optional The indices of the positive classes beta : int, optional Weight of precision in harmonic mean average : str, optional 'micro', 'macro' or 'weighted'
metrics_from_confusion_matrix
python
ProHiryu/bert-chinese-ner
tf_metrics.py
https://github.com/ProHiryu/bert-chinese-ner/blob/master/tf_metrics.py
MIT
def run_external_command(command: List[str], print_output: bool = True) -> str: """Wrapper to ease the use of calling external programs""" process = subprocess.Popen(command, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = process.communicate() ret = process.wait() if (output and print_output) or ret != 0: print(output) if ret != 0: raise RuntimeError("Command returned non-zero exit code %s!" % ret) return output
Wrapper to ease the use of calling external programs
run_external_command
python
Syncplay/syncplay
ci/macos_app_arch_check.py
https://github.com/Syncplay/syncplay/blob/master/ci/macos_app_arch_check.py
Apache-2.0