body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@abstractmethod def set(self, U): 'Load new data into existing plot objects.' pass
4,344,444,513,218,146,300
Load new data into existing plot objects.
src/pymor/discretizers/builtin/gui/matplotlib.py
set
TreeerT/pymor
python
@abstractmethod def set(self, U): pass
@abstractmethod def animate(self, u): 'Load new data into existing plot objects.' pass
-6,992,738,916,723,379,000
Load new data into existing plot objects.
src/pymor/discretizers/builtin/gui/matplotlib.py
animate
TreeerT/pymor
python
@abstractmethod def animate(self, u): pass
def read_worldbank(iso3166alpha3): ' Fetches and tidies all ~1500 World Bank indicators\n for a given ISO 3166 alpha 3 code.\n\n For a particular alpha 3 code, this function fetches the entire ZIP\n file for that particular country for all World Bank indicators in a\n wide format where years are columns. The dataframe is changed into a\n narrow format so that year becomes a single column with each row\n representing a different year for a single indicator.\n\n Args:\n iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.\n\n Returns:\n A tidied pandas dataframe with all indicator codes for a particular\n country in the format of (country, indicator, year, value).\n\n Notes:\n Takes approximately 10 seconds to download and\n tidy one country in a Jupyter notebook.\n ' country_zip = (('http://api.worldbank.org/v2/en/country/' + iso3166alpha3) + '?downloadformat=csv') r = requests.get(country_zip) filebytes = io.BytesIO(r.content) myzipfile = zipfile.ZipFile(filebytes) file_to_open = None for file in myzipfile.namelist(): if file.startswith('API'): file_to_open = file break assert (file_to_open is not None), ('Failed to find data for' + iso3166alpha3) df = None line_match = re.compile('\\"([^\\"]*)\\"') for line in myzipfile.open(file_to_open).readlines(): cols = line_match.findall(line.decode('utf-8')) if (len(cols) > 2): if (df is None): df = pd.DataFrame(columns=cols) else: df = df.append(pd.DataFrame([cols], columns=df.columns), ignore_index=True) df = df.rename(columns=WORLDBANK_COL_REMAP) df = df.set_index(['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode']) df = df.stack() df.index = df.index.rename('year', level=4) df.name = 'Value' df = df.reset_index() df['Value'] = pd.to_numeric(df['Value']) df = df.dropna() return df
-7,063,378,916,210,146,000
Fetches and tidies all ~1500 World Bank indicators for a given ISO 3166 alpha 3 code. For a particular alpha 3 code, this function fetches the entire ZIP file for that particular country for all World Bank indicators in a wide format where years are columns. The dataframe is changed into a narrow format so that year becomes a single column with each row representing a different year for a single indicator. Args: iso3166alpha3: ISO 3166 alpha 3 for a country, as a string. Returns: A tidied pandas dataframe with all indicator codes for a particular country in the format of (country, indicator, year, value). Notes: Takes approximately 10 seconds to download and tidy one country in a Jupyter notebook.
scripts/world_bank/worldbank.py
read_worldbank
IanCostello/data
python
def read_worldbank(iso3166alpha3): ' Fetches and tidies all ~1500 World Bank indicators\n for a given ISO 3166 alpha 3 code.\n\n For a particular alpha 3 code, this function fetches the entire ZIP\n file for that particular country for all World Bank indicators in a\n wide format where years are columns. The dataframe is changed into a\n narrow format so that year becomes a single column with each row\n representing a different year for a single indicator.\n\n Args:\n iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.\n\n Returns:\n A tidied pandas dataframe with all indicator codes for a particular\n country in the format of (country, indicator, year, value).\n\n Notes:\n Takes approximately 10 seconds to download and\n tidy one country in a Jupyter notebook.\n ' country_zip = (('http://api.worldbank.org/v2/en/country/' + iso3166alpha3) + '?downloadformat=csv') r = requests.get(country_zip) filebytes = io.BytesIO(r.content) myzipfile = zipfile.ZipFile(filebytes) file_to_open = None for file in myzipfile.namelist(): if file.startswith('API'): file_to_open = file break assert (file_to_open is not None), ('Failed to find data for' + iso3166alpha3) df = None line_match = re.compile('\\"([^\\"]*)\\"') for line in myzipfile.open(file_to_open).readlines(): cols = line_match.findall(line.decode('utf-8')) if (len(cols) > 2): if (df is None): df = pd.DataFrame(columns=cols) else: df = df.append(pd.DataFrame([cols], columns=df.columns), ignore_index=True) df = df.rename(columns=WORLDBANK_COL_REMAP) df = df.set_index(['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode']) df = df.stack() df.index = df.index.rename('year', level=4) df.name = 'Value' df = df.reset_index() df['Value'] = pd.to_numeric(df['Value']) df = df.dropna() return df
def build_stat_vars_from_indicator_list(row): ' Generates World Bank StatVar for a row in the indicators dataframe. ' def row_to_constraints(row): ' Helper to generate list of constraints. ' constraints_text = '' next_constraint = 1 while ((f'p{next_constraint}' in row) and (not pd.isna(row[f'p{next_constraint}']))): variable = row[f'p{next_constraint}'] constraint = row[f'v{next_constraint}'] constraints_text += f'''{variable}: dcs:{constraint} ''' next_constraint += 1 return constraints_text new_stat_var = TEMPLATE_STAT_VAR.replace('{INDICATOR}', row['IndicatorCode'].replace('.', '_')).replace('{NAME}', row['IndicatorName']).replace('{DESCRIPTION}', row['SourceNote']).replace('{measuredProperty}', row['measuredProp']).replace('{CONSTRAINTS}', row_to_constraints(row)) for optional_col in ['populationType', 'statType', 'measurementDenominator']: if (not pd.isna(row[optional_col])): new_stat_var = new_stat_var.replace(f'{{{optional_col}}}', row[optional_col]) else: new_stat_var = new_stat_var.replace(f'''{optional_col}: dcs:{{{optional_col}}} ''', '') return new_stat_var
-7,121,890,781,742,843,000
Generates World Bank StatVar for a row in the indicators dataframe.
scripts/world_bank/worldbank.py
build_stat_vars_from_indicator_list
IanCostello/data
python
def build_stat_vars_from_indicator_list(row): ' ' def row_to_constraints(row): ' Helper to generate list of constraints. ' constraints_text = next_constraint = 1 while ((f'p{next_constraint}' in row) and (not pd.isna(row[f'p{next_constraint}']))): variable = row[f'p{next_constraint}'] constraint = row[f'v{next_constraint}'] constraints_text += f'{variable}: dcs:{constraint} ' next_constraint += 1 return constraints_text new_stat_var = TEMPLATE_STAT_VAR.replace('{INDICATOR}', row['IndicatorCode'].replace('.', '_')).replace('{NAME}', row['IndicatorName']).replace('{DESCRIPTION}', row['SourceNote']).replace('{measuredProperty}', row['measuredProp']).replace('{CONSTRAINTS}', row_to_constraints(row)) for optional_col in ['populationType', 'statType', 'measurementDenominator']: if (not pd.isna(row[optional_col])): new_stat_var = new_stat_var.replace(f'{{{optional_col}}}', row[optional_col]) else: new_stat_var = new_stat_var.replace(f'{optional_col}: dcs:{{{optional_col}}} ', ) return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes): ' Groups stat vars by their observation schemas.\n\n Groups Stat Vars by their inclusion of StatVar Observation\n properties like measurementMethod or Unit.\n The current template MCF schema does not support optional values in the\n CSV so we must place these stat vars into\n different template MCFs and CSVs.\n\n Args:\n indicator_codes: List of World Bank indicator codes with\n their Data Commons mappings, as a pandas dataframe.\n\n Returns:\n Array of tuples for each statistical variable grouping.\n 1) template MCF, as a string.\n 2) columns to include in exported csv, as a list of strings.\n 3) indicator codes in this grouping, as a list of strings.\n ' properties_of_stat_var_observation = ['measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'] tmcfs_for_stat_vars = [] null_status = indicator_codes.notna() for permutation in list(itertools.product([False, True], repeat=len(properties_of_stat_var_observation))): codes_that_match = null_status.copy() base_template_mcf = TEMPLATE_TMCF cols_to_include_in_csv = ['IndicatorCode'] for (include_col, column) in zip(permutation, properties_of_stat_var_observation): codes_that_match = codes_that_match.query(f'{column} == {include_col}') if include_col: base_template_mcf += f'''{column}: C:WorldBank->{column} ''' cols_to_include_in_csv.append(f'{column}') tmcfs_for_stat_vars.append((base_template_mcf, cols_to_include_in_csv, list(indicator_codes.loc[codes_that_match.index]['IndicatorCode']))) return tmcfs_for_stat_vars
1,955,805,183,199,638,800
Groups stat vars by their observation schemas. Groups Stat Vars by their inclusion of StatVar Observation properties like measurementMethod or Unit. The current template MCF schema does not support optional values in the CSV so we must place these stat vars into different template MCFs and CSVs. Args: indicator_codes: List of World Bank indicator codes with their Data Commons mappings, as a pandas dataframe. Returns: Array of tuples for each statistical variable grouping. 1) template MCF, as a string. 2) columns to include in exported csv, as a list of strings. 3) indicator codes in this grouping, as a list of strings.
scripts/world_bank/worldbank.py
group_stat_vars_by_observation_properties
IanCostello/data
python
def group_stat_vars_by_observation_properties(indicator_codes): ' Groups stat vars by their observation schemas.\n\n Groups Stat Vars by their inclusion of StatVar Observation\n properties like measurementMethod or Unit.\n The current template MCF schema does not support optional values in the\n CSV so we must place these stat vars into\n different template MCFs and CSVs.\n\n Args:\n indicator_codes: List of World Bank indicator codes with\n their Data Commons mappings, as a pandas dataframe.\n\n Returns:\n Array of tuples for each statistical variable grouping.\n 1) template MCF, as a string.\n 2) columns to include in exported csv, as a list of strings.\n 3) indicator codes in this grouping, as a list of strings.\n ' properties_of_stat_var_observation = ['measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'] tmcfs_for_stat_vars = [] null_status = indicator_codes.notna() for permutation in list(itertools.product([False, True], repeat=len(properties_of_stat_var_observation))): codes_that_match = null_status.copy() base_template_mcf = TEMPLATE_TMCF cols_to_include_in_csv = ['IndicatorCode'] for (include_col, column) in zip(permutation, properties_of_stat_var_observation): codes_that_match = codes_that_match.query(f'{column} == {include_col}') if include_col: base_template_mcf += f'{column}: C:WorldBank->{column} ' cols_to_include_in_csv.append(f'{column}') tmcfs_for_stat_vars.append((base_template_mcf, cols_to_include_in_csv, list(indicator_codes.loc[codes_that_match.index]['IndicatorCode']))) return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes): ' Downloads World Bank country data for all countries and\n indicators provided.\n\n Retains only the unique indicator codes provided.\n\n Args:\n worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each\n country.\n indicator_code: Dataframe with INDICATOR_CODES to include.\n\n Returns:\n worldbank_dataframe: A tidied pandas dataframe where each row has\n the format (indicator code, ISO 3166 alpha 3, year, value)\n for all countries and all indicators provided.\n ' worldbank_dataframe = pd.DataFrame() indicators_to_keep = list(indicator_codes['IndicatorCode'].unique()) for (index, country_code) in enumerate(worldbank_countries['ISO3166Alpha3']): print(f'Downloading {country_code}') country_df = read_worldbank(country_code) country_df = country_df[country_df['IndicatorCode'].isin(indicators_to_keep)] country_df['ISO3166Alpha3'] = country_code worldbank_dataframe = worldbank_dataframe.append(country_df) worldbank_dataframe['StatisticalVariable'] = worldbank_dataframe['IndicatorCode'].apply((lambda code: f"WorldBank/{code.replace('.', '_')}")) return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
-1,911,059,532,361,748,700
Downloads World Bank country data for all countries and indicators provided. Retains only the unique indicator codes provided. Args: worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each country. indicator_code: Dataframe with INDICATOR_CODES to include. Returns: worldbank_dataframe: A tidied pandas dataframe where each row has the format (indicator code, ISO 3166 alpha 3, year, value) for all countries and all indicators provided.
scripts/world_bank/worldbank.py
download_indicator_data
IanCostello/data
python
def download_indicator_data(worldbank_countries, indicator_codes): ' Downloads World Bank country data for all countries and\n indicators provided.\n\n Retains only the unique indicator codes provided.\n\n Args:\n worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each\n country.\n indicator_code: Dataframe with INDICATOR_CODES to include.\n\n Returns:\n worldbank_dataframe: A tidied pandas dataframe where each row has\n the format (indicator code, ISO 3166 alpha 3, year, value)\n for all countries and all indicators provided.\n ' worldbank_dataframe = pd.DataFrame() indicators_to_keep = list(indicator_codes['IndicatorCode'].unique()) for (index, country_code) in enumerate(worldbank_countries['ISO3166Alpha3']): print(f'Downloading {country_code}') country_df = read_worldbank(country_code) country_df = country_df[country_df['IndicatorCode'].isin(indicators_to_keep)] country_df['ISO3166Alpha3'] = country_code worldbank_dataframe = worldbank_dataframe.append(country_df) worldbank_dataframe['StatisticalVariable'] = worldbank_dataframe['IndicatorCode'].apply((lambda code: f"WorldBank/{code.replace('.', '_')}")) return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars, indicator_codes): ' Outputs TMCFs and CSVs for each grouping of stat vars.\n\n Args:\n worldbank_dataframe: Dataframe containing all indicators for all\n countries.\n tmcfs_for_stat_vars: Array of tuples of template MCF,\n columns on stat var observations,\n indicator codes for that template.\n indicator_codes -> Dataframe with INDICATOR_CODES to include.\n ' output_csv = worldbank_dataframe[['StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value']] for (index, enum) in enumerate(tmcfs_for_stat_vars): (tmcf, stat_var_obs_cols, stat_vars_in_group) = enum if (len(stat_vars_in_group) != 0): with open(f'output/WorldBank_{index}.tmcf', 'w', newline='') as f_out: f_out.write(tmcf) matching_csv = output_csv[output_csv['IndicatorCode'].isin(stat_vars_in_group)] if (len(stat_var_obs_cols) > 1): matching_csv = pd.merge(matching_csv, indicator_codes[stat_var_obs_cols], on='IndicatorCode') matching_csv = matching_csv.round(10) matching_csv.drop('IndicatorCode', axis=1).to_csv(f'output/WorldBank_{index}.csv', float_format='%.10f', index=False)
3,036,552,345,292,613,000
Outputs TMCFs and CSVs for each grouping of stat vars. Args: worldbank_dataframe: Dataframe containing all indicators for all countries. tmcfs_for_stat_vars: Array of tuples of template MCF, columns on stat var observations, indicator codes for that template. indicator_codes -> Dataframe with INDICATOR_CODES to include.
scripts/world_bank/worldbank.py
output_csv_and_tmcf_by_grouping
IanCostello/data
python
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars, indicator_codes): ' Outputs TMCFs and CSVs for each grouping of stat vars.\n\n Args:\n worldbank_dataframe: Dataframe containing all indicators for all\n countries.\n tmcfs_for_stat_vars: Array of tuples of template MCF,\n columns on stat var observations,\n indicator codes for that template.\n indicator_codes -> Dataframe with INDICATOR_CODES to include.\n ' output_csv = worldbank_dataframe[['StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value']] for (index, enum) in enumerate(tmcfs_for_stat_vars): (tmcf, stat_var_obs_cols, stat_vars_in_group) = enum if (len(stat_vars_in_group) != 0): with open(f'output/WorldBank_{index}.tmcf', 'w', newline=) as f_out: f_out.write(tmcf) matching_csv = output_csv[output_csv['IndicatorCode'].isin(stat_vars_in_group)] if (len(stat_var_obs_cols) > 1): matching_csv = pd.merge(matching_csv, indicator_codes[stat_var_obs_cols], on='IndicatorCode') matching_csv = matching_csv.round(10) matching_csv.drop('IndicatorCode', axis=1).to_csv(f'output/WorldBank_{index}.csv', float_format='%.10f', index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup): ' Scales values by sourceScalingFactor and inputs exisiting stat vars.\n\n First, this function converts all values to per capita. Some measures\n in the World Bank dataset are per thousand or per hundred thousand, but\n we need to scale these to the common denomination format. Secondly,\n some statistical variables such as Count_Person_InLaborForce are not\n World Bank specific and need to be replaced. Both of these are imputted\n from the following two lists in args.\n\n Args:\n scaling_factor_lookup: A dictionary of a mapping between World Bank\n indicator code to the respective numeric scaling factor.\n existing_stat_var_lookup: A dictionary of a mapping between all\n indicator to be replaced with the exisiting stat var to replace it.\n ' indicator_code = row['IndicatorCode'] if (indicator_code in scaling_factor_lookup): row['Value'] = (row['Value'] / int(scaling_factor_lookup[indicator_code])) if (indicator_code in existing_stat_var_lookup): row['StatisticalVariable'] = ('dcid:' + existing_stat_var_lookup[indicator_code]) return row
-7,367,889,510,659,683,000
Scales values by sourceScalingFactor and inputs exisiting stat vars. First, this function converts all values to per capita. Some measures in the World Bank dataset are per thousand or per hundred thousand, but we need to scale these to the common denomination format. Secondly, some statistical variables such as Count_Person_InLaborForce are not World Bank specific and need to be replaced. Both of these are imputted from the following two lists in args. Args: scaling_factor_lookup: A dictionary of a mapping between World Bank indicator code to the respective numeric scaling factor. existing_stat_var_lookup: A dictionary of a mapping between all indicator to be replaced with the exisiting stat var to replace it.
scripts/world_bank/worldbank.py
source_scaling_remap
IanCostello/data
python
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup): ' Scales values by sourceScalingFactor and inputs exisiting stat vars.\n\n First, this function converts all values to per capita. Some measures\n in the World Bank dataset are per thousand or per hundred thousand, but\n we need to scale these to the common denomination format. Secondly,\n some statistical variables such as Count_Person_InLaborForce are not\n World Bank specific and need to be replaced. Both of these are imputted\n from the following two lists in args.\n\n Args:\n scaling_factor_lookup: A dictionary of a mapping between World Bank\n indicator code to the respective numeric scaling factor.\n existing_stat_var_lookup: A dictionary of a mapping between all\n indicator to be replaced with the exisiting stat var to replace it.\n ' indicator_code = row['IndicatorCode'] if (indicator_code in scaling_factor_lookup): row['Value'] = (row['Value'] / int(scaling_factor_lookup[indicator_code])) if (indicator_code in existing_stat_var_lookup): row['StatisticalVariable'] = ('dcid:' + existing_stat_var_lookup[indicator_code]) return row
def row_to_constraints(row): ' Helper to generate list of constraints. ' constraints_text = '' next_constraint = 1 while ((f'p{next_constraint}' in row) and (not pd.isna(row[f'p{next_constraint}']))): variable = row[f'p{next_constraint}'] constraint = row[f'v{next_constraint}'] constraints_text += f'''{variable}: dcs:{constraint} ''' next_constraint += 1 return constraints_text
-6,597,162,794,003,730,000
Helper to generate list of constraints.
scripts/world_bank/worldbank.py
row_to_constraints
IanCostello/data
python
def row_to_constraints(row): ' ' constraints_text = next_constraint = 1 while ((f'p{next_constraint}' in row) and (not pd.isna(row[f'p{next_constraint}']))): variable = row[f'p{next_constraint}'] constraint = row[f'v{next_constraint}'] constraints_text += f'{variable}: dcs:{constraint} ' next_constraint += 1 return constraints_text
def request_file(url): '从远端下载文件, 并构建request.FILES中的uploaded file对象返回. \n @param url: 文件url路径, 如http://abc.im/12345.jpg\n \n @return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object) \n ' if (not url): return response = requests.get(url) return SimpleUploadedFile('file', response.content)
4,314,314,294,437,754,000
从远端下载文件, 并构建request.FILES中的uploaded file对象返回. @param url: 文件url路径, 如http://abc.im/12345.jpg @return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
apps/utils/http.py
request_file
dlooto/driver-vision
python
def request_file(url): '从远端下载文件, 并构建request.FILES中的uploaded file对象返回. \n @param url: 文件url路径, 如http://abc.im/12345.jpg\n \n @return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object) \n ' if (not url): return response = requests.get(url) return SimpleUploadedFile('file', response.content)
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30, headers={'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}): '发起http请求. 执行结果返回响应字符串\n \n @param: The sample parameters format like following: \n params = {\'token\': \'dF0zeqAPWs\'}\n headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}\n host = \'fir.im\'\n port = 80\n method = \'GET\'\n send_url = \'/api/v2/app/version/541a7131f?token=dF0zeqBMXAP\'\n ' encoded_params = urllib.urlencode(params) conn = httplib.HTTPConnection(host, port=port, timeout=timeout) conn.request(method, send_url, encoded_params, headers) response = conn.getresponse() response_str = response.read() conn.close() return response_str
1,627,781,333,786,985,000
发起http请求. 执行结果返回响应字符串 @param: The sample parameters format like following: params = {'token': 'dF0zeqAPWs'} headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} host = 'fir.im' port = 80 method = 'GET' send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
apps/utils/http.py
send_request
dlooto/driver-vision
python
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30, headers={'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}): '发起http请求. 执行结果返回响应字符串\n \n @param: The sample parameters format like following: \n params = {\'token\': \'dF0zeqAPWs\'}\n headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}\n host = \'fir.im\'\n port = 80\n method = \'GET\'\n send_url = \'/api/v2/app/version/541a7131f?token=dF0zeqBMXAP\'\n ' encoded_params = urllib.urlencode(params) conn = httplib.HTTPConnection(host, port=port, timeout=timeout) conn.request(method, send_url, encoded_params, headers) response = conn.getresponse() response_str = response.read() conn.close() return response_str
def standard_response(template, req, context): '返回http Web response' return render_to_response(template, RequestContext(req, context))
-2,021,967,324,553,648,600
返回http Web response
apps/utils/http.py
standard_response
dlooto/driver-vision
python
def standard_response(template, req, context): return render_to_response(template, RequestContext(req, context))
def ok(data={}): 'data为字典类型数据' return (JResponse(codes.append('ok', data)) if data else resp('ok'))
2,627,429,873,032,745,000
data为字典类型数据
apps/utils/http.py
ok
dlooto/driver-vision
python
def ok(data={}): return (JResponse(codes.append('ok', data)) if data else resp('ok'))
def resp(crr, msg=''): '返回常量错误码. msg可格式化具有占位符的字符串\n \n params:\n @crr 错误码标识\n ' return JResponse(codes.fmat(crr, msg))
-288,261,512,890,758,300
返回常量错误码. msg可格式化具有占位符的字符串 params: @crr 错误码标识
apps/utils/http.py
resp
dlooto/driver-vision
python
def resp(crr, msg=): '返回常量错误码. msg可格式化具有占位符的字符串\n \n params:\n @crr 错误码标识\n ' return JResponse(codes.fmat(crr, msg))
async def send_async_http(session, method, url, *, retries=1, interval=1, wait_factor=2, timeout=30, success_callback=None, fail_callback=None, **kwargs) -> dict: '\n 发送一个异步请求至某个特定url,实现失败重试\n 每一次失败后会延时一段时间再去重试,延时时间由\n interval和wait_factor决定\n :param session:请求的异步session\n :param method:请求方法\n :param url:请求url\n :param retries:失败重试次数\n :param interval:失败后的再次异步请求的延时时长\n :param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒\n :param timeout:连接超时时长\n :param success_callback:成功回调函数\n :param fail_callback:失败回调函数\n :param kwargs:其他键值参数\n :return:返回字典结果\n ' exception = None ret = {'cost': None, 'code': 0, 'exception': exception, 'tries': (- 1)} wait_interval = interval if (method.lower() not in ['get', 'head', 'post']): return ret if (retries == (- 1)): attempt = (- 1) elif (retries == 0): attempt = 1 else: attempt = (retries + 1) while (attempt != 0): try: start = datetime.datetime.now() async with getattr(session, method)(url, timeout=timeout, **kwargs) as response: end = datetime.datetime.now() t = (end - start).total_seconds() code = response.status ret = {'cost': t, 'code': code, 'tries': ((retries - attempt) + 1)} if success_callback: success_callback(ret) return ret except Exception as e: ret['exception'] = e ret['tries'] += 1 (await asyncio.sleep(wait_interval)) wait_interval = (wait_interval * wait_factor) attempt -= 1 if fail_callback: fail_callback(ret) return ret
-5,617,060,968,950,471,000
发送一个异步请求至某个特定url,实现失败重试 每一次失败后会延时一段时间再去重试,延时时间由 interval和wait_factor决定 :param session:请求的异步session :param method:请求方法 :param url:请求url :param retries:失败重试次数 :param interval:失败后的再次异步请求的延时时长 :param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒 :param timeout:连接超时时长 :param success_callback:成功回调函数 :param fail_callback:失败回调函数 :param kwargs:其他键值参数 :return:返回字典结果
tools/async_tools.py
send_async_http
01ly/FooProxy
python
async def send_async_http(session, method, url, *, retries=1, interval=1, wait_factor=2, timeout=30, success_callback=None, fail_callback=None, **kwargs) -> dict: '\n 发送一个异步请求至某个特定url,实现失败重试\n 每一次失败后会延时一段时间再去重试,延时时间由\n interval和wait_factor决定\n :param session:请求的异步session\n :param method:请求方法\n :param url:请求url\n :param retries:失败重试次数\n :param interval:失败后的再次异步请求的延时时长\n :param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒\n :param timeout:连接超时时长\n :param success_callback:成功回调函数\n :param fail_callback:失败回调函数\n :param kwargs:其他键值参数\n :return:返回字典结果\n ' exception = None ret = {'cost': None, 'code': 0, 'exception': exception, 'tries': (- 1)} wait_interval = interval if (method.lower() not in ['get', 'head', 'post']): return ret if (retries == (- 1)): attempt = (- 1) elif (retries == 0): attempt = 1 else: attempt = (retries + 1) while (attempt != 0): try: start = datetime.datetime.now() async with getattr(session, method)(url, timeout=timeout, **kwargs) as response: end = datetime.datetime.now() t = (end - start).total_seconds() code = response.status ret = {'cost': t, 'code': code, 'tries': ((retries - attempt) + 1)} if success_callback: success_callback(ret) return ret except Exception as e: ret['exception'] = e ret['tries'] += 1 (await asyncio.sleep(wait_interval)) wait_interval = (wait_interval * wait_factor) attempt -= 1 if fail_callback: fail_callback(ret) return ret
def connect(argv): '\n connect [connector type] [connector args ...]\n 连接到设备\n 支持的设备类型:\n connect adb [serial or tcpip endpoint]\n ' connector_type = 'adb' if (len(argv) > 1): connector_type = argv[1] connector_args = argv[2:] else: connector_args = [] if (connector_type == 'adb'): _connect_adb(connector_args) else: print('unknown connector type:', connector_type)
5,385,238,541,063,250,000
connect [connector type] [connector args ...] 连接到设备 支持的设备类型: connect adb [serial or tcpip endpoint]
Arknights/shell_next.py
connect
TeemoKill/ArknightsAutoHelper
python
def connect(argv): '\n connect [connector type] [connector args ...]\n 连接到设备\n 支持的设备类型:\n connect adb [serial or tcpip endpoint]\n ' connector_type = 'adb' if (len(argv) > 1): connector_type = argv[1] connector_args = argv[2:] else: connector_args = [] if (connector_type == 'adb'): _connect_adb(connector_args) else: print('unknown connector type:', connector_type)
def quick(argv): '\n quick [+-rR[N]] [n]\n 重复挑战当前画面关卡特定次数或直到理智不足\n +r/-r 是否自动回复理智,最多回复 N 次\n +R/-R 是否使用源石回复理智(需要同时开启 +r)\n ' ops = _parse_opt(argv) if (len(argv) == 2): count = int(argv[1]) else: count = 114514 (helper, context) = _create_helper(show_toggle=True) for op in ops: op(helper) with context: helper.module_battle_slim(c_id=None, set_count=count) return 0
4,636,756,966,689,924,000
quick [+-rR[N]] [n] 重复挑战当前画面关卡特定次数或直到理智不足 +r/-r 是否自动回复理智,最多回复 N 次 +R/-R 是否使用源石回复理智(需要同时开启 +r)
Arknights/shell_next.py
quick
TeemoKill/ArknightsAutoHelper
python
def quick(argv): '\n quick [+-rR[N]] [n]\n 重复挑战当前画面关卡特定次数或直到理智不足\n +r/-r 是否自动回复理智,最多回复 N 次\n +R/-R 是否使用源石回复理智(需要同时开启 +r)\n ' ops = _parse_opt(argv) if (len(argv) == 2): count = int(argv[1]) else: count = 114514 (helper, context) = _create_helper(show_toggle=True) for op in ops: op(helper) with context: helper.module_battle_slim(c_id=None, set_count=count) return 0
def auto(argv): '\n auto [+-rR[N]] stage1 count1 [stage2 count2] ...\n 按顺序挑战指定关卡特定次数直到理智不足\n ' ops = _parse_opt(argv) arglist = argv[1:] if ((len(arglist) % 2) != 0): print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...') return 1 it = iter(arglist) tasks = [(stage.upper(), int(counts)) for (stage, counts) in zip(it, it)] (helper, context) = _create_helper(show_toggle=True) for op in ops: op(helper) with context: helper.main_handler(clear_tasks=False, task_list=tasks, auto_close=False) return 0
6,632,307,330,463,694,000
auto [+-rR[N]] stage1 count1 [stage2 count2] ... 按顺序挑战指定关卡特定次数直到理智不足
Arknights/shell_next.py
auto
TeemoKill/ArknightsAutoHelper
python
def auto(argv): '\n auto [+-rR[N]] stage1 count1 [stage2 count2] ...\n 按顺序挑战指定关卡特定次数直到理智不足\n ' ops = _parse_opt(argv) arglist = argv[1:] if ((len(arglist) % 2) != 0): print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...') return 1 it = iter(arglist) tasks = [(stage.upper(), int(counts)) for (stage, counts) in zip(it, it)] (helper, context) = _create_helper(show_toggle=True) for op in ops: op(helper) with context: helper.main_handler(clear_tasks=False, task_list=tasks, auto_close=False) return 0
def collect(argv): '\n collect\n 收集每日任务和每周任务奖励\n ' (helper, context) = _create_helper() with context: helper.clear_task() return 0
-1,399,731,280,119,893,800
collect 收集每日任务和每周任务奖励
Arknights/shell_next.py
collect
TeemoKill/ArknightsAutoHelper
python
def collect(argv): '\n collect\n 收集每日任务和每周任务奖励\n ' (helper, context) = _create_helper() with context: helper.clear_task() return 0
def recruit(argv): '\n recruit [tags ...]\n 公开招募识别/计算,不指定标签则从截图中识别\n ' from . import recruit_calc if (2 <= len(argv) <= 6): tags = argv[1:] result = recruit_calc.calculate(tags) elif (len(argv) == 1): (helper, context) = _create_helper(use_status_line=False) with context: result = helper.recruit() else: print('要素过多') return 1 colors = ['\x1b[36m', '\x1b[90m', '\x1b[37m', '\x1b[32m', '\x1b[93m', '\x1b[91m'] reset = '\x1b[39m' for (tags, operators, rank) in result: taglist = ','.join(tags) if (rank >= 1): taglist = (('\x1b[96m' + taglist) + '\x1b[39m') print(('%s: %s' % (taglist, ' '.join((((colors[op[1]] + op[0]) + reset) for op in operators)))))
8,619,267,370,571,826,000
recruit [tags ...] 公开招募识别/计算,不指定标签则从截图中识别
Arknights/shell_next.py
recruit
TeemoKill/ArknightsAutoHelper
python
def recruit(argv): '\n recruit [tags ...]\n 公开招募识别/计算,不指定标签则从截图中识别\n ' from . import recruit_calc if (2 <= len(argv) <= 6): tags = argv[1:] result = recruit_calc.calculate(tags) elif (len(argv) == 1): (helper, context) = _create_helper(use_status_line=False) with context: result = helper.recruit() else: print('要素过多') return 1 colors = ['\x1b[36m', '\x1b[90m', '\x1b[37m', '\x1b[32m', '\x1b[93m', '\x1b[91m'] reset = '\x1b[39m' for (tags, operators, rank) in result: taglist = ','.join(tags) if (rank >= 1): taglist = (('\x1b[96m' + taglist) + '\x1b[39m') print(('%s: %s' % (taglist, ' '.join((((colors[op[1]] + op[0]) + reset) for op in operators)))))
def interactive(argv): '\n interactive\n 进入交互模式,减少按键次数(\n ' import shlex import traceback helpcmds(interactive_cmds) errorlevel = None try: import readline except ImportError: pass while True: try: if (device is None): prompt = 'akhelper> ' else: prompt = ('akhelper %s> ' % str(device)) cmdline = input(prompt) argv = shlex.split(cmdline) if ((len(argv) == 0) or (argv[0] == '?') or (argv[0] == 'help')): print(' '.join((x.__name__ for x in interactive_cmds))) continue elif (argv[0] == 'exit'): break cmd = match_cmd(argv[0], interactive_cmds) if (cmd is not None): with _alarm_context_factory(): errorlevel = cmd(argv) except EOFError: print('') break except (Exception, KeyboardInterrupt) as e: errorlevel = e traceback.print_exc() continue return errorlevel
-7,922,720,041,851,671,000
interactive 进入交互模式,减少按键次数(
Arknights/shell_next.py
interactive
TeemoKill/ArknightsAutoHelper
python
def interactive(argv): '\n interactive\n 进入交互模式,减少按键次数(\n ' import shlex import traceback helpcmds(interactive_cmds) errorlevel = None try: import readline except ImportError: pass while True: try: if (device is None): prompt = 'akhelper> ' else: prompt = ('akhelper %s> ' % str(device)) cmdline = input(prompt) argv = shlex.split(cmdline) if ((len(argv) == 0) or (argv[0] == '?') or (argv[0] == 'help')): print(' '.join((x.__name__ for x in interactive_cmds))) continue elif (argv[0] == 'exit'): break cmd = match_cmd(argv[0], interactive_cmds) if (cmd is not None): with _alarm_context_factory(): errorlevel = cmd(argv) except EOFError: print() break except (Exception, KeyboardInterrupt) as e: errorlevel = e traceback.print_exc() continue return errorlevel
def help(argv): '\n help\n 输出本段消息\n ' print(('usage: %s command [command args]' % argv0)) helpcmds(global_cmds)
-3,847,951,780,685,274,600
help 输出本段消息
Arknights/shell_next.py
help
TeemoKill/ArknightsAutoHelper
python
def help(argv): '\n help\n 输出本段消息\n ' print(('usage: %s command [command args]' % argv0)) helpcmds(global_cmds)
def majority_vote(labels, weight=None): 'Perform majority vote to determine the true label from\n multiple noisy oracles.\n\n Parameters\n ----------\n labels: list\n A list with length=k, which contains the labels provided by\n k noisy oracles.\n\n weight: list, optional (default=None)\n The weights of each oracle. It should have the same length with\n labels.\n\n Returns\n -------\n vote_count: int\n The number of votes.\n\n vote_result: object\n The label of the selected_instance, produced by majority voting\n of the selected oracles.\n ' oracle_weight = (np.ones(len(labels)) if (weight is None) else weight) assert (len(labels) == len(oracle_weight)) vote_result = collections.Counter(labels) most_votes = vote_result.most_common(n=1) return (most_votes[0][1], most_votes[0][0])
6,889,896,317,466,734,000
Perform majority vote to determine the true label from multiple noisy oracles. Parameters ---------- labels: list A list with length=k, which contains the labels provided by k noisy oracles. weight: list, optional (default=None) The weights of each oracle. It should have the same length with labels. Returns ------- vote_count: int The number of votes. vote_result: object The label of the selected_instance, produced by majority voting of the selected oracles.
alipy/query_strategy/noisy_oracles.py
majority_vote
Houchaoqun/ALiPy
python
def majority_vote(labels, weight=None): 'Perform majority vote to determine the true label from\n multiple noisy oracles.\n\n Parameters\n ----------\n labels: list\n A list with length=k, which contains the labels provided by\n k noisy oracles.\n\n weight: list, optional (default=None)\n The weights of each oracle. It should have the same length with\n labels.\n\n Returns\n -------\n vote_count: int\n The number of votes.\n\n vote_result: object\n The label of the selected_instance, produced by majority voting\n of the selected oracles.\n ' oracle_weight = (np.ones(len(labels)) if (weight is None) else weight) assert (len(labels) == len(oracle_weight)) vote_result = collections.Counter(labels) most_votes = vote_result.most_common(n=1) return (most_votes[0][1], most_votes[0][0])
def get_query_results(selected_instance, oracles, names=None): 'Get the query results from oracles of the selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n names: list, optional (default=None)\n A list of str which contains the names of oracles to query from.\n If not provided, it will query from all oracles.\n Each name should in oracles.names().\n\n Returns\n -------\n query_labels: list\n The queried labels.\n\n query_costs: list\n The total cost of query.\n ' costs = [] if isinstance(oracles, list): oracle_type = 'list' for oracle in oracles: assert isinstance(oracle, Oracle) elif isinstance(oracles, Oracles): oracle_type = 'oracles' else: raise TypeError('The type of parameter oracles must be a list or alipy.oracle.Oracles object.') labeling_results = [] if (oracle_type == 'list'): for i in (oracles.names() if (oracle_type == 'oracles') else range(len(oracles))): (lab, co) = oracles[i].query_by_index(selected_instance) labeling_results.append(lab[0]) costs.append(np.sum(co)) else: results = oracles.query_from_s(selected_instance, oracles_name=names) labeling_results = [res[0][0] for res in results] costs = [np.sum(res[1]) for res in results] return (labeling_results, costs)
4,728,099,483,708,930,000
Get the query results from oracles of the selected instance. Parameters ---------- selected_instance: int The indexes of selected samples. Should be a member of unlabeled set. oracles: {list, alipy.oracle.Oracles} An alipy.oracle.Oracle object that contains all the available oracles or a list of oracles. Each oracle should be a alipy.oracle.Oracle object. names: list, optional (default=None) A list of str which contains the names of oracles to query from. If not provided, it will query from all oracles. Each name should in oracles.names(). Returns ------- query_labels: list The queried labels. query_costs: list The total cost of query.
alipy/query_strategy/noisy_oracles.py
get_query_results
Houchaoqun/ALiPy
python
def get_query_results(selected_instance, oracles, names=None): 'Get the query results from oracles of the selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n names: list, optional (default=None)\n A list of str which contains the names of oracles to query from.\n If not provided, it will query from all oracles.\n Each name should in oracles.names().\n\n Returns\n -------\n query_labels: list\n The queried labels.\n\n query_costs: list\n The total cost of query.\n ' costs = [] if isinstance(oracles, list): oracle_type = 'list' for oracle in oracles: assert isinstance(oracle, Oracle) elif isinstance(oracles, Oracles): oracle_type = 'oracles' else: raise TypeError('The type of parameter oracles must be a list or alipy.oracle.Oracles object.') labeling_results = [] if (oracle_type == 'list'): for i in (oracles.names() if (oracle_type == 'oracles') else range(len(oracles))): (lab, co) = oracles[i].query_by_index(selected_instance) labeling_results.append(lab[0]) costs.append(np.sum(co)) else: results = oracles.query_from_s(selected_instance, oracles_name=names) labeling_results = [res[0][0] for res in results] costs = [np.sum(res[1]) for res in results] return (labeling_results, costs)
def get_majority_vote(selected_instance, oracles, names=None): 'Get the majority vote results of the selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n names: list, optional (default=None)\n A list of str which contains the names of oracles to query from.\n If not provided, it will query from all oracles.\n Each name should in oracles.names().\n\n Returns\n -------\n vote_count: int\n The number of votes.\n\n vote_result: object\n The label of the selected_instance, produced by majority voting\n of the selected oracles.\n\n query_costs: int\n The total cost of query.\n ' (labeling_results, cost) = get_query_results(selected_instance, oracles, names) majority_vote_result = majority_vote(labeling_results) return (majority_vote_result[0], majority_vote_result[1], np.sum(cost))
7,810,245,918,018,826,000
Get the majority vote results of the selected instance. Parameters ---------- selected_instance: int The indexes of selected samples. Should be a member of unlabeled set. oracles: {list, alipy.oracle.Oracles} An alipy.oracle.Oracle object that contains all the available oracles or a list of oracles. Each oracle should be a alipy.oracle.Oracle object. names: list, optional (default=None) A list of str which contains the names of oracles to query from. If not provided, it will query from all oracles. Each name should in oracles.names(). Returns ------- vote_count: int The number of votes. vote_result: object The label of the selected_instance, produced by majority voting of the selected oracles. query_costs: int The total cost of query.
alipy/query_strategy/noisy_oracles.py
get_majority_vote
Houchaoqun/ALiPy
python
def get_majority_vote(selected_instance, oracles, names=None): 'Get the majority vote results of the selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n names: list, optional (default=None)\n A list of str which contains the names of oracles to query from.\n If not provided, it will query from all oracles.\n Each name should in oracles.names().\n\n Returns\n -------\n vote_count: int\n The number of votes.\n\n vote_result: object\n The label of the selected_instance, produced by majority voting\n of the selected oracles.\n\n query_costs: int\n The total cost of query.\n ' (labeling_results, cost) = get_query_results(selected_instance, oracles, names) majority_vote_result = majority_vote(labeling_results) return (majority_vote_result[0], majority_vote_result[1], np.sum(cost))
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs): "Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n model: object, optional (default=None)\n Current classification model, should have the 'predict_proba' method for probabilistic output.\n If not provided, LogisticRegression with default parameters implemented by sklearn will be used.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance.\n\n selected_oracle: int or str\n The index of selected oracle.\n If a list is given, the index of oracle will be returned.\n If a Oracles object is given, the oracle name will be returned.\n " if (model is None): model = LogisticRegression(solver='liblinear') model.fit(self.X[label_index], self.y[label_index]) (pred_unlab, _) = _get_proba_pred(self.X[unlabel_index], model) n_neighbors = min(kwargs.pop('n_neighbors', 10), (len(self._ini_ind) - 1)) return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab, n_neighbors=n_neighbors, eval_cost=eval_cost)
5,707,326,584,340,173,000
Query from oracles. Return the index of selected instance and oracle. Parameters ---------- label_index: {list, np.ndarray, IndexCollection} The indexes of labeled samples. unlabel_index: {list, np.ndarray, IndexCollection} The indexes of unlabeled samples. eval_cost: bool, optional (default=False) To evaluate the cost of oracles or use the cost provided by oracles. model: object, optional (default=None) Current classification model, should have the 'predict_proba' method for probabilistic output. If not provided, LogisticRegression with default parameters implemented by sklearn will be used. n_neighbors: int, optional (default=10) How many neighbors of the selected instance will be used to evaluate the oracles. Returns ------- selected_instance: int The index of selected instance. selected_oracle: int or str The index of selected oracle. If a list is given, the index of oracle will be returned. If a Oracles object is given, the oracle name will be returned.
alipy/query_strategy/noisy_oracles.py
select
Houchaoqun/ALiPy
python
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs): "Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n model: object, optional (default=None)\n Current classification model, should have the 'predict_proba' method for probabilistic output.\n If not provided, LogisticRegression with default parameters implemented by sklearn will be used.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance.\n\n selected_oracle: int or str\n The index of selected oracle.\n If a list is given, the index of oracle will be returned.\n If a Oracles object is given, the oracle name will be returned.\n " if (model is None): model = LogisticRegression(solver='liblinear') model.fit(self.X[label_index], self.y[label_index]) (pred_unlab, _) = _get_proba_pred(self.X[unlabel_index], model) n_neighbors = min(kwargs.pop('n_neighbors', 10), (len(self._ini_ind) - 1)) return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab, n_neighbors=n_neighbors, eval_cost=eval_cost)
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs): 'Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance.\n\n selected_oracle: int or str\n The index of selected oracle.\n If a list is given, the index of oracle will be returned.\n If a Oracles object is given, the oracle name will be returned.\n ' n_neighbors = min(kwargs.pop('n_neighbors', 10), (len(self._ini_ind) - 1)) eval_cost = kwargs.pop('n_neighbors', False) (Q_table, oracle_ind_name_dict) = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict, n_neighbors=n_neighbors, eval_cost=eval_cost) selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape) sel_ora = oracle_ind_name_dict[selected_pair[0]] if (not isinstance(sel_ora, list)): sel_ora = [sel_ora] return ([unlabel_index[selected_pair[1]]], sel_ora)
2,987,720,386,717,713,000
Query from oracles. Return the index of selected instance and oracle. Parameters ---------- label_index: {list, np.ndarray, IndexCollection} The indexes of labeled samples. unlabel_index: {list, np.ndarray, IndexCollection} The indexes of unlabeled samples. predict: : 2d array, shape [n_samples, n_classes] The probabilistic prediction matrix for the unlabeled set. n_neighbors: int, optional (default=10) How many neighbors of the selected instance will be used to evaluate the oracles. eval_cost: bool, optional (default=False) To evaluate the cost of oracles or use the cost provided by oracles. Returns ------- selected_instance: int The index of selected instance. selected_oracle: int or str The index of selected oracle. If a list is given, the index of oracle will be returned. If a Oracles object is given, the oracle name will be returned.
alipy/query_strategy/noisy_oracles.py
select_by_prediction_mat
Houchaoqun/ALiPy
python
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs): 'Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance.\n\n selected_oracle: int or str\n The index of selected oracle.\n If a list is given, the index of oracle will be returned.\n If a Oracles object is given, the oracle name will be returned.\n ' n_neighbors = min(kwargs.pop('n_neighbors', 10), (len(self._ini_ind) - 1)) eval_cost = kwargs.pop('n_neighbors', False) (Q_table, oracle_ind_name_dict) = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict, n_neighbors=n_neighbors, eval_cost=eval_cost) selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape) sel_ora = oracle_ind_name_dict[selected_pair[0]] if (not isinstance(sel_ora, list)): sel_ora = [sel_ora] return ([unlabel_index[selected_pair[1]]], sel_ora)
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False): 'Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n Returns\n -------\n Q_table: 2D array\n The Q table.\n\n oracle_ind_name_dict: dict\n The oracle name/index of each row of Q_table.\n ' if ((self.X is None) or (self.y is None)): raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.') assert isinstance(unlabel_index, collections.Iterable) assert isinstance(label_index, collections.Iterable) unlabel_index = np.asarray(unlabel_index) label_index = np.asarray(label_index) num_of_neighbors = n_neighbors if (len(unlabel_index) <= 1): return unlabel_index Q_table = np.zeros((len(oracles), len(unlabel_index))) spv = np.shape(pred_unlab) rx = np.partition(pred_unlab, (spv[1] - 1), axis=1) rx = (1 - rx[:, (spv[1] - 1)]) for (unlab_ind, unlab_ins_ind) in enumerate(unlabel_index): (nn_dist, nn_of_selected_ins) = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, (- 1)), n_neighbors=num_of_neighbors, return_distance=True) nn_dist = nn_dist[0] nn_of_selected_ins = nn_of_selected_ins[0] nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] oracles_score = [] for (ora_ind, ora_name) in enumerate(self._oracles_iterset): oracle = oracles[ora_name] (labels, cost) = oracle.query_by_index(nn_of_selected_ins) oracles_score.append((sum([(nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]])) for i in range(num_of_neighbors)]) / num_of_neighbors)) (labels, cost) = oracle.query_by_index(label_index) if eval_cost: oracles_cost = (sum([(labels[i] == self.y[label_index[i]]) for i in range(len(label_index))]) / len(label_index)) else: oracles_cost = cost[0] Q_table[(ora_ind, unlab_ind)] = ((oracles_score[ora_ind] * rx[unlab_ind]) / max(oracles_cost, 0.0001)) return (Q_table, self._oracle_ind_name_dict)
6,530,858,138,236,620,000
Query from oracles. Return the Q table and the oracle name/index of each row of Q_table. Parameters ---------- label_index: {list, np.ndarray, IndexCollection} The indexes of labeled samples. unlabel_index: {list, np.ndarray, IndexCollection} The indexes of unlabeled samples. oracles: {list, alipy.oracle.Oracles} An alipy.oracle.Oracle object that contains all the available oracles or a list of oracles. Each oracle should be a alipy.oracle.Oracle object. predict: : 2d array, shape [n_samples, n_classes] The probabilistic prediction matrix for the unlabeled set. n_neighbors: int, optional (default=10) How many neighbors of the selected instance will be used to evaluate the oracles. eval_cost: bool, optional (default=False) To evaluate the cost of oracles or use the cost provided by oracles. Returns ------- Q_table: 2D array The Q table. oracle_ind_name_dict: dict The oracle name/index of each row of Q_table.
alipy/query_strategy/noisy_oracles.py
_calc_Q_table
Houchaoqun/ALiPy
python
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False): 'Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n oracles: {list, alipy.oracle.Oracles}\n An alipy.oracle.Oracle object that contains all the\n available oracles or a list of oracles.\n Each oracle should be a alipy.oracle.Oracle object.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n n_neighbors: int, optional (default=10)\n How many neighbors of the selected instance will be used\n to evaluate the oracles.\n\n eval_cost: bool, optional (default=False)\n To evaluate the cost of oracles or use the cost provided by oracles.\n\n Returns\n -------\n Q_table: 2D array\n The Q table.\n\n oracle_ind_name_dict: dict\n The oracle name/index of each row of Q_table.\n ' if ((self.X is None) or (self.y is None)): raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.') assert isinstance(unlabel_index, collections.Iterable) assert isinstance(label_index, collections.Iterable) unlabel_index = np.asarray(unlabel_index) label_index = np.asarray(label_index) num_of_neighbors = n_neighbors if (len(unlabel_index) <= 1): return unlabel_index Q_table = np.zeros((len(oracles), len(unlabel_index))) spv = np.shape(pred_unlab) rx = np.partition(pred_unlab, (spv[1] - 1), axis=1) rx = (1 - rx[:, (spv[1] - 1)]) for (unlab_ind, unlab_ins_ind) in enumerate(unlabel_index): (nn_dist, nn_of_selected_ins) = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, (- 1)), n_neighbors=num_of_neighbors, return_distance=True) nn_dist = nn_dist[0] nn_of_selected_ins = nn_of_selected_ins[0] nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] oracles_score = [] for (ora_ind, ora_name) in enumerate(self._oracles_iterset): oracle = oracles[ora_name] (labels, cost) = oracle.query_by_index(nn_of_selected_ins) oracles_score.append((sum([(nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]])) for i in range(num_of_neighbors)]) / num_of_neighbors)) (labels, cost) = oracle.query_by_index(label_index) if eval_cost: oracles_cost = (sum([(labels[i] == self.y[label_index[i]]) for i in range(len(label_index))]) / len(label_index)) else: oracles_cost = cost[0] Q_table[(ora_ind, unlab_ind)] = ((oracles_score[ora_ind] * rx[unlab_ind]) / max(oracles_cost, 0.0001)) return (Q_table, self._oracle_ind_name_dict)
def select(self, label_index, unlabel_index, model=None, **kwargs): 'Select an instance and a batch of oracles to label it.\n The instance is selected by uncertainty, the oracles is\n selected by the difference between their\n labeling results and the majority vote results.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance. Selected by uncertainty.\n\n selected_oracles: list\n The selected oracles for querying.\n ' if (model is None): model = LogisticRegression(solver='liblinear') model.fit(self.X[label_index], self.y[label_index]) (pred_unlab, _) = _get_proba_pred(self.X[unlabel_index], model) return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
-6,239,752,573,043,427,000
Select an instance and a batch of oracles to label it. The instance is selected by uncertainty, the oracles is selected by the difference between their labeling results and the majority vote results. Parameters ---------- label_index: {list, np.ndarray, IndexCollection} The indexes of labeled samples. unlabel_index: {list, np.ndarray, IndexCollection} The indexes of unlabeled samples. Returns ------- selected_instance: int The index of selected instance. Selected by uncertainty. selected_oracles: list The selected oracles for querying.
alipy/query_strategy/noisy_oracles.py
select
Houchaoqun/ALiPy
python
def select(self, label_index, unlabel_index, model=None, **kwargs): 'Select an instance and a batch of oracles to label it.\n The instance is selected by uncertainty, the oracles is\n selected by the difference between their\n labeling results and the majority vote results.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance. Selected by uncertainty.\n\n selected_oracles: list\n The selected oracles for querying.\n ' if (model is None): model = LogisticRegression(solver='liblinear') model.fit(self.X[label_index], self.y[label_index]) (pred_unlab, _) = _get_proba_pred(self.X[unlabel_index], model) return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
def select_by_prediction_mat(self, label_index, unlabel_index, predict): 'Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance. Selected by uncertainty.\n\n selected_oracles: list\n The selected oracles for querying.\n ' assert isinstance(unlabel_index, collections.Iterable) assert isinstance(label_index, collections.Iterable) unlabel_index = np.asarray(unlabel_index) label_index = np.asarray(label_index) if (len(unlabel_index) <= 1): return unlabel_index unc = QueryInstanceUncertainty(measure='least_confident') selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0] return ([selected_instance], self.select_by_given_instance(selected_instance))
-8,976,557,389,727,556,000
Query from oracles. Return the index of selected instance and oracle. Parameters ---------- label_index: {list, np.ndarray, IndexCollection} The indexes of labeled samples. unlabel_index: {list, np.ndarray, IndexCollection} The indexes of unlabeled samples. predict: : 2d array, shape [n_samples, n_classes] The probabilistic prediction matrix for the unlabeled set. Returns ------- selected_instance: int The index of selected instance. Selected by uncertainty. selected_oracles: list The selected oracles for querying.
alipy/query_strategy/noisy_oracles.py
select_by_prediction_mat
Houchaoqun/ALiPy
python
def select_by_prediction_mat(self, label_index, unlabel_index, predict): 'Query from oracles. Return the index of selected instance and oracle.\n\n Parameters\n ----------\n label_index: {list, np.ndarray, IndexCollection}\n The indexes of labeled samples.\n\n unlabel_index: {list, np.ndarray, IndexCollection}\n The indexes of unlabeled samples.\n\n predict: : 2d array, shape [n_samples, n_classes]\n The probabilistic prediction matrix for the unlabeled set.\n\n Returns\n -------\n selected_instance: int\n The index of selected instance. Selected by uncertainty.\n\n selected_oracles: list\n The selected oracles for querying.\n ' assert isinstance(unlabel_index, collections.Iterable) assert isinstance(label_index, collections.Iterable) unlabel_index = np.asarray(unlabel_index) label_index = np.asarray(label_index) if (len(unlabel_index) <= 1): return unlabel_index unc = QueryInstanceUncertainty(measure='least_confident') selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0] return ([selected_instance], self.select_by_given_instance(selected_instance))
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05): 'Calculate the UI(a) by providing the labeling history and the majority vote results.\n\n Parameters\n ----------\n oracle_history: dict\n The labeling history of an oracle. The key is the index of instance, the value is the\n label given by the oracle.\n\n majority_vote_result: dict\n The results of majority vote of instances. The key is the index of instance,\n the value is the label given by the oracle.\n\n alpha: float, optional (default=0.05)\n Used for calculating the critical value for the Student’s t-distribution with n−1\n degrees of freedom at the alpha/2 confidence level.\n\n Returns\n -------\n uia: float\n The UI(a) value.\n ' n = len(self._oracles_iterset) t_crit_val = scipy.stats.t.isf([(alpha / 2)], (n - 1))[0] reward_arr = [] for ind in oracle_history.keys(): if (oracle_history[ind] == majority_vote_result[ind]): reward_arr.append(1) else: reward_arr.append(0) mean_a = np.mean(reward_arr) std_a = np.std(reward_arr) uia = (mean_a + ((t_crit_val * std_a) / np.sqrt(n))) return uia
-5,705,311,290,492,422,000
Calculate the UI(a) by providing the labeling history and the majority vote results. Parameters ---------- oracle_history: dict The labeling history of an oracle. The key is the index of instance, the value is the label given by the oracle. majority_vote_result: dict The results of majority vote of instances. The key is the index of instance, the value is the label given by the oracle. alpha: float, optional (default=0.05) Used for calculating the critical value for the Student’s t-distribution with n−1 degrees of freedom at the alpha/2 confidence level. Returns ------- uia: float The UI(a) value.
alipy/query_strategy/noisy_oracles.py
_calc_uia
Houchaoqun/ALiPy
python
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05): 'Calculate the UI(a) by providing the labeling history and the majority vote results.\n\n Parameters\n ----------\n oracle_history: dict\n The labeling history of an oracle. The key is the index of instance, the value is the\n label given by the oracle.\n\n majority_vote_result: dict\n The results of majority vote of instances. The key is the index of instance,\n the value is the label given by the oracle.\n\n alpha: float, optional (default=0.05)\n Used for calculating the critical value for the Student’s t-distribution with n−1\n degrees of freedom at the alpha/2 confidence level.\n\n Returns\n -------\n uia: float\n The UI(a) value.\n ' n = len(self._oracles_iterset) t_crit_val = scipy.stats.t.isf([(alpha / 2)], (n - 1))[0] reward_arr = [] for ind in oracle_history.keys(): if (oracle_history[ind] == majority_vote_result[ind]): reward_arr.append(1) else: reward_arr.append(0) mean_a = np.mean(reward_arr) std_a = np.std(reward_arr) uia = (mean_a + ((t_crit_val * std_a) / np.sqrt(n))) return uia
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n selected_oracles: list\n The selected oracles for querying.\n ' selected_oracles = np.nonzero((self._UI >= (self.epsilon * np.max(self._UI)))) selected_oracles = selected_oracles[0] labeling_results = [] for i in selected_oracles: (lab, _) = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance) labeling_results.append(lab[0]) self._oracles_history[i][selected_instance] = copy.copy(lab[0]) (_, majority_vote_result) = majority_vote(labeling_results) reward_arr = np.zeros(len(selected_oracles)) same_ind = np.nonzero((labeling_results == majority_vote_result))[0] reward_arr[same_ind] = 1 self._majority_vote_results[selected_instance] = majority_vote_result for i in selected_oracles: self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results) return [self._oracle_ind_name_dict[i] for i in selected_oracles]
2,648,065,523,936,558,600
Select oracle to query by providing the index of selected instance. Parameters ---------- selected_instance: int The indexes of selected samples. Should be a member of unlabeled set. Returns ------- selected_oracles: list The selected oracles for querying.
alipy/query_strategy/noisy_oracles.py
select_by_given_instance
Houchaoqun/ALiPy
python
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n selected_oracles: list\n The selected oracles for querying.\n ' selected_oracles = np.nonzero((self._UI >= (self.epsilon * np.max(self._UI)))) selected_oracles = selected_oracles[0] labeling_results = [] for i in selected_oracles: (lab, _) = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance) labeling_results.append(lab[0]) self._oracles_history[i][selected_instance] = copy.copy(lab[0]) (_, majority_vote_result) = majority_vote(labeling_results) reward_arr = np.zeros(len(selected_oracles)) same_ind = np.nonzero((labeling_results == majority_vote_result))[0] reward_arr[same_ind] = 1 self._majority_vote_results[selected_instance] = majority_vote_result for i in selected_oracles: self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results) return [self._oracle_ind_name_dict[i] for i in selected_oracles]
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n oracles_ind: list\n The indexes of selected oracles.\n ' return self._oracle_ind_name_dict.values()
3,101,368,587,388,647,400
Select oracle to query by providing the index of selected instance. Parameters ---------- selected_instance: int The indexes of selected samples. Should be a member of unlabeled set. Returns ------- oracles_ind: list The indexes of selected oracles.
alipy/query_strategy/noisy_oracles.py
select_by_given_instance
Houchaoqun/ALiPy
python
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n oracles_ind: list\n The indexes of selected oracles.\n ' return self._oracle_ind_name_dict.values()
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n oracles_ind: list\n The indexes of selected oracles.\n ' return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
2,175,907,547,186,365,200
Select oracle to query by providing the index of selected instance. Parameters ---------- selected_instance: int The indexes of selected samples. Should be a member of unlabeled set. Returns ------- oracles_ind: list The indexes of selected oracles.
alipy/query_strategy/noisy_oracles.py
select_by_given_instance
Houchaoqun/ALiPy
python
def select_by_given_instance(self, selected_instance): 'Select oracle to query by providing the index of selected instance.\n\n Parameters\n ----------\n selected_instance: int\n The indexes of selected samples. Should be a member of unlabeled set.\n\n Returns\n -------\n oracles_ind: list\n The indexes of selected oracles.\n ' return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
def run(self, params={}): 'Add label to issue' issue = self.connection.client.issue(id=params['id']) if (not issue): raise Exception(('Error: No issue found with ID: ' + params['id'])) labels = params['label'].split(',') for label in labels: if (label not in issue.fields.labels): issue.fields.labels.append(label) self.logger.info('Adding labels to issue %s: %s', params['id'], issue.fields.labels) issue.update(fields={'labels': issue.fields.labels}) return {'success': True}
4,915,808,299,214,609,000
Add label to issue
jira/komand_jira/actions/label_issue/action.py
run
xhennessy-r7/insightconnect-plugins
python
def run(self, params={}): issue = self.connection.client.issue(id=params['id']) if (not issue): raise Exception(('Error: No issue found with ID: ' + params['id'])) labels = params['label'].split(',') for label in labels: if (label not in issue.fields.labels): issue.fields.labels.append(label) self.logger.info('Adding labels to issue %s: %s', params['id'], issue.fields.labels) issue.update(fields={'labels': issue.fields.labels}) return {'success': True}
def normalize_query_parameters(params): '9.1.1. Normalize Request Parameters' return '&'.join(map((lambda pair: '='.join([_quote(pair[0]), _quote(pair[1])])), sorted(params.items())))
941,568,051,545,711,700
9.1.1. Normalize Request Parameters
emailage/signature.py
normalize_query_parameters
bluefish6/Emailage_Python
python
def normalize_query_parameters(params): return '&'.join(map((lambda pair: '='.join([_quote(pair[0]), _quote(pair[1])])), sorted(params.items())))
def concatenate_request_elements(method, url, query): '9.1.3. Concatenate Request Elements' return '&'.join(map(_quote, [str(method).upper(), url, query]))
-1,319,764,161,872,008,200
9.1.3. Concatenate Request Elements
emailage/signature.py
concatenate_request_elements
bluefish6/Emailage_Python
python
def concatenate_request_elements(method, url, query): return '&'.join(map(_quote, [str(method).upper(), url, query]))
def hmac_sha1(base_string, hmac_key): '9.2. HMAC-SHA1' hash = hmac.new(b(hmac_key), b(base_string), sha1) return hash.digest()
8,651,286,224,927,855,000
9.2. HMAC-SHA1
emailage/signature.py
hmac_sha1
bluefish6/Emailage_Python
python
def hmac_sha1(base_string, hmac_key): hash = hmac.new(b(hmac_key), b(base_string), sha1) return hash.digest()
def encode(digest): '9.2.1. Generating Signature' return base64.b64encode(digest).decode('ascii').rstrip('\n')
6,410,103,192,144,333,000
9.2.1. Generating Signature
emailage/signature.py
encode
bluefish6/Emailage_Python
python
def encode(digest): return base64.b64encode(digest).decode('ascii').rstrip('\n')
def add_oauth_entries_to_fields_dict(secret, params, nonce=None, timestamp=None): " Adds dict entries to the user's params dict which are required for OAuth1.0 signature generation\n\n :param secret: API secret\n :param params: dictionary of values which will be sent in the query\n :param nonce: (Optional) random string used in signature creation, uuid4() is used if not provided\n :param timestamp: (Optional) integer-format timestamp, time.time() is used if not provided\n :return: dict containing params and the OAuth1.0 fields required before executing signature.create\n\n :type secret: str\n :type params: dict\n :type nonce: str\n :type timestamp: int\n\n :Example:\n\n >>> from emailage.signature import add_oauth_entries_to_fields_dict\n >>> query_params = dict(user_email='[email protected]', query='[email protected]' )\n >>> query_params = add_oauth_entries_to_fields_dict('YOUR_API_SECRET', query_params)\n >>> query_params['oauth_consumer_key']\n 'YOUR_API_SECRET'\n >>> query_params['oauth_signature_method']\n 'HMAC-SHA1'\n >>> query_params['oauth_version']\n 1.0\n " if (nonce is None): nonce = uuid4() if (timestamp is None): timestamp = int(time.time()) params['oauth_consumer_key'] = secret params['oauth_nonce'] = nonce params['oauth_signature_method'] = 'HMAC-SHA1' params['oauth_timestamp'] = timestamp params['oauth_version'] = 1.0 return params
3,161,492,757,002,849,000
Adds dict entries to the user's params dict which are required for OAuth1.0 signature generation :param secret: API secret :param params: dictionary of values which will be sent in the query :param nonce: (Optional) random string used in signature creation, uuid4() is used if not provided :param timestamp: (Optional) integer-format timestamp, time.time() is used if not provided :return: dict containing params and the OAuth1.0 fields required before executing signature.create :type secret: str :type params: dict :type nonce: str :type timestamp: int :Example: >>> from emailage.signature import add_oauth_entries_to_fields_dict >>> query_params = dict(user_email='[email protected]', query='[email protected]' ) >>> query_params = add_oauth_entries_to_fields_dict('YOUR_API_SECRET', query_params) >>> query_params['oauth_consumer_key'] 'YOUR_API_SECRET' >>> query_params['oauth_signature_method'] 'HMAC-SHA1' >>> query_params['oauth_version'] 1.0
emailage/signature.py
add_oauth_entries_to_fields_dict
bluefish6/Emailage_Python
python
def add_oauth_entries_to_fields_dict(secret, params, nonce=None, timestamp=None): " Adds dict entries to the user's params dict which are required for OAuth1.0 signature generation\n\n :param secret: API secret\n :param params: dictionary of values which will be sent in the query\n :param nonce: (Optional) random string used in signature creation, uuid4() is used if not provided\n :param timestamp: (Optional) integer-format timestamp, time.time() is used if not provided\n :return: dict containing params and the OAuth1.0 fields required before executing signature.create\n\n :type secret: str\n :type params: dict\n :type nonce: str\n :type timestamp: int\n\n :Example:\n\n >>> from emailage.signature import add_oauth_entries_to_fields_dict\n >>> query_params = dict(user_email='[email protected]', query='[email protected]' )\n >>> query_params = add_oauth_entries_to_fields_dict('YOUR_API_SECRET', query_params)\n >>> query_params['oauth_consumer_key']\n 'YOUR_API_SECRET'\n >>> query_params['oauth_signature_method']\n 'HMAC-SHA1'\n >>> query_params['oauth_version']\n 1.0\n " if (nonce is None): nonce = uuid4() if (timestamp is None): timestamp = int(time.time()) params['oauth_consumer_key'] = secret params['oauth_nonce'] = nonce params['oauth_signature_method'] = 'HMAC-SHA1' params['oauth_timestamp'] = timestamp params['oauth_version'] = 1.0 return params
def create(method, url, params, hmac_key): " Generates the OAuth1.0 signature used as the value for the query string parameter 'oauth_signature'\n \n :param method: HTTP method that will be used to send the request ( 'GET' | 'POST' ); EmailageClient uses GET\n :param url: API domain and endpoint up to the ?\n :param params: user-provided query string parameters and the OAuth1.0 parameters\n :method add_oauth_entries_to_fields_dict:\n :param hmac_key: for Emailage users, this is your consumer token with an '&' (ampersand) appended to the end\n\n :return: str value used for oauth_signature\n\n :type method: str\n :type url: str\n :type params: dict\n :type hmac_key: str\n\n :Example:\n\n >>> from emailage.signature import add_oauth_entries_to_fields_dict, create\n >>> your_api_key = 'SOME_KEY'\n >>> your_hmac_key = 'SOME_SECRET' + '&'\n >>> api_url = 'https://sandbox.emailage.com/emailagevalidator/'\n >>> query_params = { 'query': '[email protected]', 'user_email': '[email protected]' }\n >>> query_params = add_oauth_entries_to_fields_dict(your_api_key, query_params)\n >>> query_params['oauth_signature'] = create('GET', api_url, query_params, your_hmac_key)\n\n " query = normalize_query_parameters(params) base_string = concatenate_request_elements(method, url, query) digest = hmac_sha1(base_string, hmac_key) return encode(digest)
-465,169,348,556,150,140
Generates the OAuth1.0 signature used as the value for the query string parameter 'oauth_signature' :param method: HTTP method that will be used to send the request ( 'GET' | 'POST' ); EmailageClient uses GET :param url: API domain and endpoint up to the ? :param params: user-provided query string parameters and the OAuth1.0 parameters :method add_oauth_entries_to_fields_dict: :param hmac_key: for Emailage users, this is your consumer token with an '&' (ampersand) appended to the end :return: str value used for oauth_signature :type method: str :type url: str :type params: dict :type hmac_key: str :Example: >>> from emailage.signature import add_oauth_entries_to_fields_dict, create >>> your_api_key = 'SOME_KEY' >>> your_hmac_key = 'SOME_SECRET' + '&' >>> api_url = 'https://sandbox.emailage.com/emailagevalidator/' >>> query_params = { 'query': '[email protected]', 'user_email': '[email protected]' } >>> query_params = add_oauth_entries_to_fields_dict(your_api_key, query_params) >>> query_params['oauth_signature'] = create('GET', api_url, query_params, your_hmac_key)
emailage/signature.py
create
bluefish6/Emailage_Python
python
def create(method, url, params, hmac_key): " Generates the OAuth1.0 signature used as the value for the query string parameter 'oauth_signature'\n \n :param method: HTTP method that will be used to send the request ( 'GET' | 'POST' ); EmailageClient uses GET\n :param url: API domain and endpoint up to the ?\n :param params: user-provided query string parameters and the OAuth1.0 parameters\n :method add_oauth_entries_to_fields_dict:\n :param hmac_key: for Emailage users, this is your consumer token with an '&' (ampersand) appended to the end\n\n :return: str value used for oauth_signature\n\n :type method: str\n :type url: str\n :type params: dict\n :type hmac_key: str\n\n :Example:\n\n >>> from emailage.signature import add_oauth_entries_to_fields_dict, create\n >>> your_api_key = 'SOME_KEY'\n >>> your_hmac_key = 'SOME_SECRET' + '&'\n >>> api_url = 'https://sandbox.emailage.com/emailagevalidator/'\n >>> query_params = { 'query': '[email protected]', 'user_email': '[email protected]' }\n >>> query_params = add_oauth_entries_to_fields_dict(your_api_key, query_params)\n >>> query_params['oauth_signature'] = create('GET', api_url, query_params, your_hmac_key)\n\n " query = normalize_query_parameters(params) base_string = concatenate_request_elements(method, url, query) digest = hmac_sha1(base_string, hmac_key) return encode(digest)
def load_custom_boot9(path: str, dev: bool=False): 'Load keys from a custom ARM9 bootROM path.' if path: from pyctr.crypto import CryptoEngine CryptoEngine(boot9=path, dev=dev)
-6,919,565,763,664,082,000
Load keys from a custom ARM9 bootROM path.
ninfs/mount/_common.py
load_custom_boot9
Jhynjhiruu/ninfs
python
def load_custom_boot9(path: str, dev: bool=False): if path: from pyctr.crypto import CryptoEngine CryptoEngine(boot9=path, dev=dev)
def __repr__(self): ' return tree as JSON serialized dictionary ' return self.pretty_print(self.__dict__)
-4,872,637,371,779,116,000
return tree as JSON serialized dictionary
gametree_lite.py
__repr__
deadsmond/gametree
python
def __repr__(self): ' ' return self.pretty_print(self.__dict__)
@staticmethod def pretty_print(dictionary: dict): ' return pretty printed dictionary as JSON serialized object ' return json.dumps(dictionary, indent=4)
5,869,241,633,187,367,000
return pretty printed dictionary as JSON serialized object
gametree_lite.py
pretty_print
deadsmond/gametree
python
@staticmethod def pretty_print(dictionary: dict): ' ' return json.dumps(dictionary, indent=4)
def __init__(self, nodes: dict=None, groups: dict=None, leafs: list=None, players_list: list=None): '\n GameTree class used to represent game tree:\n\n Attributes\n ----------\n nodes : dict\n dictionary of nodes;\n groups : dict\n dictionary of groups\n leafs : list\n list of leafs, calculated on demand\n players_list: list\n list of players names, indicating which game income from list is connected to which player\n ' '\n dictionary of nodes:\n Attributes\n ----------\n node : dict\n dictionary representing node;\n\n Attributes\n ----------\n value : float\n value of node (the prize for reaching the node)\n parents : dict\n parents of node - can be multiple, represented by dict of ids and connection values\n children : dict\n children of node - can be multiple, represented by dict of ids and connection values\n probability : float\n probability of node - 1 means there is no random choice\n branch : dict\n totals of branch, to avoid tree walking\n\n Attributes\n ----------\n value : float\n total value of branch\n probability : float\n probability of reaching this node in game\n ' self._nodes = {} self._groups = ({} if (groups is None) else groups) self._leafs = ([] if (leafs is None) else leafs) self._players_list = ([] if (players_list is None) else players_list) (self.add_node({'id': 'root', 'player': '1'}) if (nodes is None) else nodes)
-7,397,175,947,897,329,000
GameTree class used to represent game tree: Attributes ---------- nodes : dict dictionary of nodes; groups : dict dictionary of groups leafs : list list of leafs, calculated on demand players_list: list list of players names, indicating which game income from list is connected to which player
gametree_lite.py
__init__
deadsmond/gametree
python
def __init__(self, nodes: dict=None, groups: dict=None, leafs: list=None, players_list: list=None): '\n GameTree class used to represent game tree:\n\n Attributes\n ----------\n nodes : dict\n dictionary of nodes;\n groups : dict\n dictionary of groups\n leafs : list\n list of leafs, calculated on demand\n players_list: list\n list of players names, indicating which game income from list is connected to which player\n ' '\n dictionary of nodes:\n Attributes\n ----------\n node : dict\n dictionary representing node;\n\n Attributes\n ----------\n value : float\n value of node (the prize for reaching the node)\n parents : dict\n parents of node - can be multiple, represented by dict of ids and connection values\n children : dict\n children of node - can be multiple, represented by dict of ids and connection values\n probability : float\n probability of node - 1 means there is no random choice\n branch : dict\n totals of branch, to avoid tree walking\n\n Attributes\n ----------\n value : float\n total value of branch\n probability : float\n probability of reaching this node in game\n ' self._nodes = {} self._groups = ({} if (groups is None) else groups) self._leafs = ([] if (leafs is None) else leafs) self._players_list = ([] if (players_list is None) else players_list) (self.add_node({'id': 'root', 'player': '1'}) if (nodes is None) else nodes)
def add_node(self, node: dict): "\n add node method. Runs basic validation before adding.\n\n :param dict node: dictionary of node's data\n " if (node.get('id') is not None): if (node['id'] in self._nodes): raise ValueError(('tried to override node %s' % node['id'])) else: raise ValueError('no id for node provided') id_ = node['id'] del node['id'] node['player'] = ('0' if (node.get('player') is None) else node['player']) node['value'] = ([0, 0] if (node.get('value') is None) else node['value']) node['parents'] = ({} if (node.get('parents') is None) else node['parents']) node['children'] = ({} if (node.get('children') is None) else node['children']) node['probability'] = (1 if (node.get('probability') is None) else node['probability']) node['branch'] = ({} if (node.get('branch') is None) else node['branch']) node['branch']['probability'] = (1 if (node['branch'].get('probability') is None) else node['branch']['probability']) if (node['player'] not in self._players_list): self._players_list.append(node['player']) for parent in node['parents']: self._nodes[parent]['children'][id_] = str(node['parents'][parent]) if node['parents']: node['depth'] = (self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1) else: node['depth'] = (0 if (node.get('depth') is None) else node['depth']) branch_probability = 0 for parent in node['parents']: branch_probability += self._nodes[parent]['branch']['probability'] node['branch']['probability'] = (branch_probability * node['probability']) if ((id_ is not 'root') and (not node['parents'])): raise ValueError(('node [%s] is not connected to the tree - parents are empty' % id_)) self._nodes[id_] = node
672,232,565,832,253,000
add node method. Runs basic validation before adding. :param dict node: dictionary of node's data
gametree_lite.py
add_node
deadsmond/gametree
python
def add_node(self, node: dict): "\n add node method. Runs basic validation before adding.\n\n :param dict node: dictionary of node's data\n " if (node.get('id') is not None): if (node['id'] in self._nodes): raise ValueError(('tried to override node %s' % node['id'])) else: raise ValueError('no id for node provided') id_ = node['id'] del node['id'] node['player'] = ('0' if (node.get('player') is None) else node['player']) node['value'] = ([0, 0] if (node.get('value') is None) else node['value']) node['parents'] = ({} if (node.get('parents') is None) else node['parents']) node['children'] = ({} if (node.get('children') is None) else node['children']) node['probability'] = (1 if (node.get('probability') is None) else node['probability']) node['branch'] = ({} if (node.get('branch') is None) else node['branch']) node['branch']['probability'] = (1 if (node['branch'].get('probability') is None) else node['branch']['probability']) if (node['player'] not in self._players_list): self._players_list.append(node['player']) for parent in node['parents']: self._nodes[parent]['children'][id_] = str(node['parents'][parent]) if node['parents']: node['depth'] = (self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1) else: node['depth'] = (0 if (node.get('depth') is None) else node['depth']) branch_probability = 0 for parent in node['parents']: branch_probability += self._nodes[parent]['branch']['probability'] node['branch']['probability'] = (branch_probability * node['probability']) if ((id_ is not 'root') and (not node['parents'])): raise ValueError(('node [%s] is not connected to the tree - parents are empty' % id_)) self._nodes[id_] = node
def add_vertex(self, id_: str, player: str, parents: dict): '\n add vertex from simplified function:\n\n :param str id_: id of the node\n :param str player: id of player owning the node\n :param dict parents: dictionary of parents for the node\n ' self.add_node({'id': id_, 'player': player, 'parents': parents})
3,655,447,033,792,964,600
add vertex from simplified function: :param str id_: id of the node :param str player: id of player owning the node :param dict parents: dictionary of parents for the node
gametree_lite.py
add_vertex
deadsmond/gametree
python
def add_vertex(self, id_: str, player: str, parents: dict): '\n add vertex from simplified function:\n\n :param str id_: id of the node\n :param str player: id of player owning the node\n :param dict parents: dictionary of parents for the node\n ' self.add_node({'id': id_, 'player': player, 'parents': parents})
def add_leaf(self, id_: str, value: list, parents: dict): "\n add leaf from simplified function:\n\n :param str id_: id of the node\n :param list value: list of node's values\n :param dict parents: dictionary of parents for the node\n " self.add_node({'id': id_, 'value': value, 'parents': parents})
1,990,150,339,781,963,000
add leaf from simplified function: :param str id_: id of the node :param list value: list of node's values :param dict parents: dictionary of parents for the node
gametree_lite.py
add_leaf
deadsmond/gametree
python
def add_leaf(self, id_: str, value: list, parents: dict): "\n add leaf from simplified function:\n\n :param str id_: id of the node\n :param list value: list of node's values\n :param dict parents: dictionary of parents for the node\n " self.add_node({'id': id_, 'value': value, 'parents': parents})
def copy_node(self, from_: str, to_: str): "\n create a copy of node's properties in another node\n\n :param str from_: origin node of properties\n :param str to_: destination node for properties\n " self._nodes[to_] = dict(self._nodes[from_])
-6,969,693,781,149,090
create a copy of node's properties in another node :param str from_: origin node of properties :param str to_: destination node for properties
gametree_lite.py
copy_node
deadsmond/gametree
python
def copy_node(self, from_: str, to_: str): "\n create a copy of node's properties in another node\n\n :param str from_: origin node of properties\n :param str to_: destination node for properties\n " self._nodes[to_] = dict(self._nodes[from_])
def change_node(self, node: dict): "\n change node method. Changes attributes provided in node dictionary\n\n :param dict node: dictionary of node's data\n " if (node.get('id') is not None): if (node['id'] not in self._nodes): raise ValueError(('tried to change non-existing node %s' % node['id'])) else: raise ValueError('no id for node provided') id_ = node['id'] del node['id'] for attribute in node: self._nodes[id_][attribute] = node[attribute]
6,497,729,752,810,320,000
change node method. Changes attributes provided in node dictionary :param dict node: dictionary of node's data
gametree_lite.py
change_node
deadsmond/gametree
python
def change_node(self, node: dict): "\n change node method. Changes attributes provided in node dictionary\n\n :param dict node: dictionary of node's data\n " if (node.get('id') is not None): if (node['id'] not in self._nodes): raise ValueError(('tried to change non-existing node %s' % node['id'])) else: raise ValueError('no id for node provided') id_ = node['id'] del node['id'] for attribute in node: self._nodes[id_][attribute] = node[attribute]
def get_parent(self, id_) -> str: ' get id of the parent node ' return list(self._nodes[id_]['parents'].keys())[0]
3,571,465,201,896,533,500
get id of the parent node
gametree_lite.py
get_parent
deadsmond/gametree
python
def get_parent(self, id_) -> str: ' ' return list(self._nodes[id_]['parents'].keys())[0]
def get_player_index(self, id_) -> int: ' return player index from players list order ' return self._players_list.index(self._nodes[id_]['player'])
1,250,716,915,530,892,800
return player index from players list order
gametree_lite.py
get_player_index
deadsmond/gametree
python
def get_player_index(self, id_) -> int: ' ' return self._players_list.index(self._nodes[id_]['player'])
def get_path_to_node(self, id_: str, mode: str='nodes') -> list: "\n get path from root to the node\n :param str id_: id of the node you want to reach from root\n :param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices\n " path_t = [] node = id_ while (node is not 'root'): if (mode == 'nodes'): path_t.insert(0, node) elif (mode == 'moves'): parent_ = self.get_parent(node) path_t.insert(0, self._nodes[parent_]['children'][node]) else: raise ValueError('mode variable is not "nodes" nor "moves"') node = self.get_parent(node) if (mode == 'nodes'): path_t.insert(0, 'root') return path_t
-2,872,573,102,666,817,500
get path from root to the node :param str id_: id of the node you want to reach from root :param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices
gametree_lite.py
get_path_to_node
deadsmond/gametree
python
def get_path_to_node(self, id_: str, mode: str='nodes') -> list: "\n get path from root to the node\n :param str id_: id of the node you want to reach from root\n :param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices\n " path_t = [] node = id_ while (node is not 'root'): if (mode == 'nodes'): path_t.insert(0, node) elif (mode == 'moves'): parent_ = self.get_parent(node) path_t.insert(0, self._nodes[parent_]['children'][node]) else: raise ValueError('mode variable is not "nodes" nor "moves"') node = self.get_parent(node) if (mode == 'nodes'): path_t.insert(0, 'root') return path_t
@staticmethod def _get_key(obj: dict, val: str) -> list: '\n get list of keys with specified value from obj dictionary\n :param dict obj: chosen dictionary\n :param str val: specified value\n ' sublist = [key for (key, value) in obj.items() if (value == val)] if sublist: return sublist else: raise ValueError(('key with value %s does not exist in %s' % (val, obj)))
8,235,374,987,499,246,000
get list of keys with specified value from obj dictionary :param dict obj: chosen dictionary :param str val: specified value
gametree_lite.py
_get_key
deadsmond/gametree
python
@staticmethod def _get_key(obj: dict, val: str) -> list: '\n get list of keys with specified value from obj dictionary\n :param dict obj: chosen dictionary\n :param str val: specified value\n ' sublist = [key for (key, value) in obj.items() if (value == val)] if sublist: return sublist else: raise ValueError(('key with value %s does not exist in %s' % (val, obj)))
def get_tree(self) -> dict: ' return copy of tree nodes structure dict' return dict(self._nodes)
8,268,054,770,871,867,000
return copy of tree nodes structure dict
gametree_lite.py
get_tree
deadsmond/gametree
python
def get_tree(self) -> dict: ' ' return dict(self._nodes)
def calculate_leafs(self): ' calculate inner list of leafs ids ' self._leafs = [node for node in self._nodes if (not self._nodes[node]['children'])]
-5,249,638,405,223,942,000
calculate inner list of leafs ids
gametree_lite.py
calculate_leafs
deadsmond/gametree
python
def calculate_leafs(self): ' ' self._leafs = [node for node in self._nodes if (not self._nodes[node]['children'])]
def get_leafs(self) -> list: ' return list of leafs ids. Will return empty list, if calculate_leafs() has not been called earlier. ' return self._leafs[:]
-8,597,578,595,401,025,000
return list of leafs ids. Will return empty list, if calculate_leafs() has not been called earlier.
gametree_lite.py
get_leafs
deadsmond/gametree
python
def get_leafs(self) -> list: ' ' return self._leafs[:]
def set_group(self, id_: str, player: str, group: list): "\n add list of ids to new group\n :param str id_: id of group\n :param str player: id of player owning the group\n :param list group: list of id's you want to create group with\n " self._groups[id_] = {'player': player, 'group': group}
-7,605,189,599,437,389,000
add list of ids to new group :param str id_: id of group :param str player: id of player owning the group :param list group: list of id's you want to create group with
gametree_lite.py
set_group
deadsmond/gametree
python
def set_group(self, id_: str, player: str, group: list): "\n add list of ids to new group\n :param str id_: id of group\n :param str player: id of player owning the group\n :param list group: list of id's you want to create group with\n " self._groups[id_] = {'player': player, 'group': group}
def get_groups(self) -> dict: ' return dictionary of groups ' return dict(self._groups)
3,093,298,729,605,135,000
return dictionary of groups
gametree_lite.py
get_groups
deadsmond/gametree
python
def get_groups(self) -> dict: ' ' return dict(self._groups)
def get_groups_of_player(self, player: str) -> list: " return list of all groups id's where player is the owner " return [group for group in self._groups if (self._groups[group]['player'] == player)]
437,090,716,761,561,800
return list of all groups id's where player is the owner
gametree_lite.py
get_groups_of_player
deadsmond/gametree
python
def get_groups_of_player(self, player: str) -> list: " " return [group for group in self._groups if (self._groups[group]['player'] == player)]
def variable_position_placement_generator(positions): '\n Use itertools.product to generate a list of tuple with different number of 0 and 1. The length of the tuple is the\n length of the input positions.\n Using itertools.compress, for each output from itertools.product pairing with input positions, we generate a list of\n positions where only those with the same index as 1 would be yielded.\n\n :param positions: list of all identified positions for the modification on the sequence\n ' for i in itertools.product([0, 1], repeat=len(positions)): (yield list(itertools.compress(positions, i)))
2,728,138,451,279,051,000
Use itertools.product to generate a list of tuple with different number of 0 and 1. The length of the tuple is the length of the input positions. Using itertools.compress, for each output from itertools.product pairing with input positions, we generate a list of positions where only those with the same index as 1 would be yielded. :param positions: list of all identified positions for the modification on the sequence
sequal/sequence.py
variable_position_placement_generator
bschulzlab/dialib_standalone
python
def variable_position_placement_generator(positions): '\n Use itertools.product to generate a list of tuple with different number of 0 and 1. The length of the tuple is the\n length of the input positions.\n Using itertools.compress, for each output from itertools.product pairing with input positions, we generate a list of\n positions where only those with the same index as 1 would be yielded.\n\n :param positions: list of all identified positions for the modification on the sequence\n ' for i in itertools.product([0, 1], repeat=len(positions)): (yield list(itertools.compress(positions, i)))
def __init__(self, seq, encoder=AminoAcid, mods=None, parse=True, parser_ignore=None, mod_position='right'): '\n :param mod_position\n Indicate the position of the modifications relative to the base block it is supposed to modify\n :type mod_position: str\n :param mods\n Dictionary whose keys are the positions within the sequence and values are array of modifications at those\n positions\n :type mods: dict\n :param encoder\n Class for encoding of sequence.\n :type encoder: BaseBlock\n :param seq\n String or array of strings or array of AminoAcid objects. The parser will recursively look over each string at\n deepest level and identify individual modifications or amino acids for processing\n :type seq: iterable\n Python iterable where the deepest level is a string\n \n ' if (type(seq) is not Sequence): if (not mods): self.mods = {} else: self.mods = mods self.encoder = encoder if (not parser_ignore): self.parser_ignore = [] else: self.parser_ignore = parser_ignore self.seq = [] current_mod = [] current_position = 0 if parse: self.sequence_parse(current_mod, current_position, mod_position, mods, seq) else: for k in seq.__dict__: if (k != 'mods'): setattr(self, k, deepcopy(seq.__dict__[k])) self.seq_length = len(self.seq)
2,356,593,637,083,451,000
:param mod_position Indicate the position of the modifications relative to the base block it is supposed to modify :type mod_position: str :param mods Dictionary whose keys are the positions within the sequence and values are array of modifications at those positions :type mods: dict :param encoder Class for encoding of sequence. :type encoder: BaseBlock :param seq String or array of strings or array of AminoAcid objects. The parser will recursively look over each string at deepest level and identify individual modifications or amino acids for processing :type seq: iterable Python iterable where the deepest level is a string
sequal/sequence.py
__init__
bschulzlab/dialib_standalone
python
def __init__(self, seq, encoder=AminoAcid, mods=None, parse=True, parser_ignore=None, mod_position='right'): '\n :param mod_position\n Indicate the position of the modifications relative to the base block it is supposed to modify\n :type mod_position: str\n :param mods\n Dictionary whose keys are the positions within the sequence and values are array of modifications at those\n positions\n :type mods: dict\n :param encoder\n Class for encoding of sequence.\n :type encoder: BaseBlock\n :param seq\n String or array of strings or array of AminoAcid objects. The parser will recursively look over each string at\n deepest level and identify individual modifications or amino acids for processing\n :type seq: iterable\n Python iterable where the deepest level is a string\n \n ' if (type(seq) is not Sequence): if (not mods): self.mods = {} else: self.mods = mods self.encoder = encoder if (not parser_ignore): self.parser_ignore = [] else: self.parser_ignore = parser_ignore self.seq = [] current_mod = [] current_position = 0 if parse: self.sequence_parse(current_mod, current_position, mod_position, mods, seq) else: for k in seq.__dict__: if (k != 'mods'): setattr(self, k, deepcopy(seq.__dict__[k])) self.seq_length = len(self.seq)
def sequence_parse(self, current_mod, current_position, mod_position, mods, seq): '\n :param seq: sequence input\n :param mods: external modification input\n :param mod_position: modification position relative to the modified residue\n :param current_position: current iterating amino acid position from the input sequence\n :type current_mod: List[Modification]\n ' for (b, m) in self.__load_sequence_iter(iter(seq)): if (not m): if (mod_position == 'left'): if (type(b) == AminoAcid): current_unit = b current_unit.position = current_position else: current_unit = self.encoder(b, current_position) if (current_mod and (not mods)): for i in current_mod: current_unit.set_modification(i) elif ((current_position in self.mods) and current_unit): if (type(self.mods[current_position]) == Modification): current_unit.set_modification(self.mods[current_position]) else: for mod in self.mods[current_position]: current_unit.set_modification(mod) self.seq.append(deepcopy(current_unit)) current_mod = [] if (mod_position == 'right'): if (current_mod and (not mods)): for i in current_mod: self.seq[(current_position - 1)].set_modification(i) if (type(b) == AminoAcid): current_unit = b current_unit.position = current_position else: current_unit = self.encoder(b, current_position) if ((current_position in self.mods) and current_unit): if (type(self.mods[current_position]) == Modification): current_unit.set_modification(self.mods[current_position]) else: for mod in self.mods[current_position]: current_unit.set_modification(mod) self.seq.append(deepcopy(current_unit)) current_mod = [] current_position += 1 elif (not mods): current_mod.append(Modification(b[1:(- 1)]))
-6,007,742,902,697,778,000
:param seq: sequence input :param mods: external modification input :param mod_position: modification position relative to the modified residue :param current_position: current iterating amino acid position from the input sequence :type current_mod: List[Modification]
sequal/sequence.py
sequence_parse
bschulzlab/dialib_standalone
python
def sequence_parse(self, current_mod, current_position, mod_position, mods, seq): '\n :param seq: sequence input\n :param mods: external modification input\n :param mod_position: modification position relative to the modified residue\n :param current_position: current iterating amino acid position from the input sequence\n :type current_mod: List[Modification]\n ' for (b, m) in self.__load_sequence_iter(iter(seq)): if (not m): if (mod_position == 'left'): if (type(b) == AminoAcid): current_unit = b current_unit.position = current_position else: current_unit = self.encoder(b, current_position) if (current_mod and (not mods)): for i in current_mod: current_unit.set_modification(i) elif ((current_position in self.mods) and current_unit): if (type(self.mods[current_position]) == Modification): current_unit.set_modification(self.mods[current_position]) else: for mod in self.mods[current_position]: current_unit.set_modification(mod) self.seq.append(deepcopy(current_unit)) current_mod = [] if (mod_position == 'right'): if (current_mod and (not mods)): for i in current_mod: self.seq[(current_position - 1)].set_modification(i) if (type(b) == AminoAcid): current_unit = b current_unit.position = current_position else: current_unit = self.encoder(b, current_position) if ((current_position in self.mods) and current_unit): if (type(self.mods[current_position]) == Modification): current_unit.set_modification(self.mods[current_position]) else: for mod in self.mods[current_position]: current_unit.set_modification(mod) self.seq.append(deepcopy(current_unit)) current_mod = [] current_position += 1 elif (not mods): current_mod.append(Modification(b[1:(- 1)]))
def to_stripped_string(self): '\n Return string of the sequence without any modification annotation\n :return: str\n ' seq = '' for i in self.seq: seq += i.value return seq
92,417,537,465,720,400
Return string of the sequence without any modification annotation :return: str
sequal/sequence.py
to_stripped_string
bschulzlab/dialib_standalone
python
def to_stripped_string(self): '\n Return string of the sequence without any modification annotation\n :return: str\n ' seq = for i in self.seq: seq += i.value return seq
def to_string_customize(self, data, annotation_placement='right', block_separator='', annotation_enclose_characters=('[', ']'), individual_annotation_enclose=False, individual_annotation_enclose_characters=('[', ']'), individual_annotation_separator=''): '\n\n :rtype: str\n :param data: a dictionary where the key is the index position of the amino acid residue and the value is a\n iterable where containing the item needed to be included into the sequence.\n :param annotation_placement: whether the information should be included on the right of the left of the residue\n :param block_separator: separator between each block of annotation information to be included\n :param annotation_enclose_characters: enclosure characters for each annotation cluster\n :param individual_annotation_enclose: whether or not each individual annotation should be enclosed\n :param individual_annotation_enclose_characters: enclosure characters for each individual annotation\n :param individual_annotation_separator: separator for each individual annotation\n :return:\n ' assert (annotation_placement in {'left', 'right'}) seq = [] for i in range(len(self.seq)): seq.append(self.seq[i].value) if (i in data): annotation = [] if individual_annotation_enclose: for v in data[i]: annotation.append('{}{}{}'.format(individual_annotation_enclose_characters[0], v, individual_annotation_enclose_characters[1])) else: annotation = data[i] if (type(annotation) == str): ann = annotation else: ann = individual_annotation_separator.join(annotation) if annotation_enclose_characters: seq.append('{}{}{}'.format(annotation_enclose_characters[0], ann, annotation_enclose_characters[1])) else: seq.append(individual_annotation_separator.join(ann)) return block_separator.join(seq)
7,458,964,784,928,617,000
:rtype: str :param data: a dictionary where the key is the index position of the amino acid residue and the value is a iterable where containing the item needed to be included into the sequence. :param annotation_placement: whether the information should be included on the right of the left of the residue :param block_separator: separator between each block of annotation information to be included :param annotation_enclose_characters: enclosure characters for each annotation cluster :param individual_annotation_enclose: whether or not each individual annotation should be enclosed :param individual_annotation_enclose_characters: enclosure characters for each individual annotation :param individual_annotation_separator: separator for each individual annotation :return:
sequal/sequence.py
to_string_customize
bschulzlab/dialib_standalone
python
def to_string_customize(self, data, annotation_placement='right', block_separator=, annotation_enclose_characters=('[', ']'), individual_annotation_enclose=False, individual_annotation_enclose_characters=('[', ']'), individual_annotation_separator=): '\n\n :rtype: str\n :param data: a dictionary where the key is the index position of the amino acid residue and the value is a\n iterable where containing the item needed to be included into the sequence.\n :param annotation_placement: whether the information should be included on the right of the left of the residue\n :param block_separator: separator between each block of annotation information to be included\n :param annotation_enclose_characters: enclosure characters for each annotation cluster\n :param individual_annotation_enclose: whether or not each individual annotation should be enclosed\n :param individual_annotation_enclose_characters: enclosure characters for each individual annotation\n :param individual_annotation_separator: separator for each individual annotation\n :return:\n ' assert (annotation_placement in {'left', 'right'}) seq = [] for i in range(len(self.seq)): seq.append(self.seq[i].value) if (i in data): annotation = [] if individual_annotation_enclose: for v in data[i]: annotation.append('{}{}{}'.format(individual_annotation_enclose_characters[0], v, individual_annotation_enclose_characters[1])) else: annotation = data[i] if (type(annotation) == str): ann = annotation else: ann = individual_annotation_separator.join(annotation) if annotation_enclose_characters: seq.append('{}{}{}'.format(annotation_enclose_characters[0], ann, annotation_enclose_characters[1])) else: seq.append(individual_annotation_separator.join(ann)) return block_separator.join(seq)
def __init__(self, seq, variable_mods=None, static_mods=None, used_scenarios=None, parse_mod_position=True, mod_position_dict=None, ignore_position=None): '\n Generator for creating modified sequences.\n :type used_scenarios: set\n :type static_mods: List[Modification]\n :type variable_mods: List[Modification]\n :type seq: str\n ' self.seq = seq if static_mods: self.static_mods = static_mods self.static_map = ModificationMap(seq, static_mods, parse_position=parse_mod_position, mod_position_dict=mod_position_dict) self.static_mod_position_dict = self.static_mod_generate() else: self.static_mod_position_dict = {} if ignore_position: self.ignore_position = ignore_position else: self.ignore_position = set() for i in self.static_mod_position_dict: self.ignore_position.add(i) if variable_mods: self.variable_mods = variable_mods if self.static_mod_position_dict: self.variable_map = ModificationMap(seq, variable_mods, ignore_positions=self.ignore_position, parse_position=parse_mod_position, mod_position_dict=mod_position_dict) else: self.variable_map = ModificationMap(seq, variable_mods) self.variable_mod_number = len(variable_mods) else: self.variable_mods = None self.variable_map_scenarios = {} if used_scenarios: self.used_scenarios_set = used_scenarios else: self.used_scenarios_set = set()
442,156,656,313,768,800
Generator for creating modified sequences. :type used_scenarios: set :type static_mods: List[Modification] :type variable_mods: List[Modification] :type seq: str
sequal/sequence.py
__init__
bschulzlab/dialib_standalone
python
def __init__(self, seq, variable_mods=None, static_mods=None, used_scenarios=None, parse_mod_position=True, mod_position_dict=None, ignore_position=None): '\n Generator for creating modified sequences.\n :type used_scenarios: set\n :type static_mods: List[Modification]\n :type variable_mods: List[Modification]\n :type seq: str\n ' self.seq = seq if static_mods: self.static_mods = static_mods self.static_map = ModificationMap(seq, static_mods, parse_position=parse_mod_position, mod_position_dict=mod_position_dict) self.static_mod_position_dict = self.static_mod_generate() else: self.static_mod_position_dict = {} if ignore_position: self.ignore_position = ignore_position else: self.ignore_position = set() for i in self.static_mod_position_dict: self.ignore_position.add(i) if variable_mods: self.variable_mods = variable_mods if self.static_mod_position_dict: self.variable_map = ModificationMap(seq, variable_mods, ignore_positions=self.ignore_position, parse_position=parse_mod_position, mod_position_dict=mod_position_dict) else: self.variable_map = ModificationMap(seq, variable_mods) self.variable_mod_number = len(variable_mods) else: self.variable_mods = None self.variable_map_scenarios = {} if used_scenarios: self.used_scenarios_set = used_scenarios else: self.used_scenarios_set = set()
def variable_mod_generate_scenarios(self): '\n Recursively generating all possible position compositions for each variable modification and add them to\n self.variable_map_scenarios dictionary where key is the value attr of the modification while the value is the\n position list\n ' for i in self.variable_mods: positions = self.variable_map.get_mod_positions(str(i)) if (i.value not in self.variable_map_scenarios): if (not i.all_fill): self.variable_map_scenarios[i.value] = list(variable_position_placement_generator(positions)) else: self.variable_map_scenarios[i.value] = [[], positions]
-6,510,374,812,757,205,000
Recursively generating all possible position compositions for each variable modification and add them to self.variable_map_scenarios dictionary where key is the value attr of the modification while the value is the position list
sequal/sequence.py
variable_mod_generate_scenarios
bschulzlab/dialib_standalone
python
def variable_mod_generate_scenarios(self): '\n Recursively generating all possible position compositions for each variable modification and add them to\n self.variable_map_scenarios dictionary where key is the value attr of the modification while the value is the\n position list\n ' for i in self.variable_mods: positions = self.variable_map.get_mod_positions(str(i)) if (i.value not in self.variable_map_scenarios): if (not i.all_fill): self.variable_map_scenarios[i.value] = list(variable_position_placement_generator(positions)) else: self.variable_map_scenarios[i.value] = [[], positions]
def render_generic_exception(exception): 'Log a traceback and return code 500 with a simple JSON\n The CORS header is set as usual. Without this, an error could lead to browsers\n caching a response without the correct CORS header.\n ' current_app.logger.error(f'Exception: {exception}') current_app.logger.error(''.join(traceback.format_tb(exception.__traceback__))) try: return make_response(jsonify(error=str(exception)), 500) except: return make_response('unhandled error', 500)
-1,476,872,618,221,553,700
Log a traceback and return code 500 with a simple JSON The CORS header is set as usual. Without this, an error could lead to browsers caching a response without the correct CORS header.
newapi/ooniapi/views.py
render_generic_exception
hellais/ooni-measurements
python
def render_generic_exception(exception): 'Log a traceback and return code 500 with a simple JSON\n The CORS header is set as usual. Without this, an error could lead to browsers\n caching a response without the correct CORS header.\n ' current_app.logger.error(f'Exception: {exception}') current_app.logger.error(.join(traceback.format_tb(exception.__traceback__))) try: return make_response(jsonify(error=str(exception)), 500) except: return make_response('unhandled error', 500)
def pixel_unshuffle(input, downscale_factor): '\n input: batchSize * c * k*w * k*h\n downscale_factor: k\n batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h\n ' c = input.shape[1] kernel = torch.zeros(size=[((downscale_factor * downscale_factor) * c), 1, downscale_factor, downscale_factor], device=input.device) for y in range(downscale_factor): for x in range(downscale_factor): kernel[(x + (y * downscale_factor))::(downscale_factor * downscale_factor), 0, y, x] = 1 return F.conv2d(input, kernel, stride=downscale_factor, groups=c)
-4,688,762,636,236,403,000
input: batchSize * c * k*w * k*h downscale_factor: k batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
src/model/PixelUnShuffle.py
pixel_unshuffle
laowng/GISR
python
def pixel_unshuffle(input, downscale_factor): '\n input: batchSize * c * k*w * k*h\n downscale_factor: k\n batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h\n ' c = input.shape[1] kernel = torch.zeros(size=[((downscale_factor * downscale_factor) * c), 1, downscale_factor, downscale_factor], device=input.device) for y in range(downscale_factor): for x in range(downscale_factor): kernel[(x + (y * downscale_factor))::(downscale_factor * downscale_factor), 0, y, x] = 1 return F.conv2d(input, kernel, stride=downscale_factor, groups=c)
def forward(self, input): '\n input: batchSize * c * k*w * k*h\n downscale_factor: k\n batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h\n ' return pixel_unshuffle(input, self.downscale_factor)
4,646,901,910,324,699,000
input: batchSize * c * k*w * k*h downscale_factor: k batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
src/model/PixelUnShuffle.py
forward
laowng/GISR
python
def forward(self, input): '\n input: batchSize * c * k*w * k*h\n downscale_factor: k\n batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h\n ' return pixel_unshuffle(input, self.downscale_factor)
def load_annotations(self, ann_file): 'Load annotation from COCO style annotation file.\n Args:\n ann_file (str): Path of annotation file.\n Returns:\n list[dict]: Annotation info from COCO api.\n ' self.coco = COCO(ann_file) self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) return data_infos
-2,126,208,448,530,252,000
Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api.
mmdet/datasets/coco_car.py
load_annotations
invite-you/mmdetection
python
def load_annotations(self, ann_file): 'Load annotation from COCO style annotation file.\n Args:\n ann_file (str): Path of annotation file.\n Returns:\n list[dict]: Annotation info from COCO api.\n ' self.coco = COCO(ann_file) self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) return data_infos
def get_ann_info(self, idx): 'Get COCO annotation by index.\n Args:\n idx (int): Index of data.\n Returns:\n dict: Annotation info of specified index.\n ' img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return self._parse_ann_info(self.data_infos[idx], ann_info)
3,511,945,127,863,459,000
Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index.
mmdet/datasets/coco_car.py
get_ann_info
invite-you/mmdetection
python
def get_ann_info(self, idx): 'Get COCO annotation by index.\n Args:\n idx (int): Index of data.\n Returns:\n dict: Annotation info of specified index.\n ' img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx): 'Get COCO category ids by index.\n Args:\n idx (int): Index of data.\n Returns:\n list[int]: All categories in the image of specified index.\n ' img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return [ann['category_id'] for ann in ann_info]
1,445,273,419,346,334,000
Get COCO category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index.
mmdet/datasets/coco_car.py
get_cat_ids
invite-you/mmdetection
python
def get_cat_ids(self, idx): 'Get COCO category ids by index.\n Args:\n idx (int): Index of data.\n Returns:\n list[int]: All categories in the image of specified index.\n ' img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32): 'Filter images too small or without ground truths.' valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) ids_in_cat = set() for (i, class_id) in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) ids_in_cat &= ids_with_ann valid_img_ids = [] for (i, img_info) in enumerate(self.data_infos): img_id = self.img_ids[i] if (self.filter_empty_gt and (img_id not in ids_in_cat)): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds
988,372,852,360,179,500
Filter images too small or without ground truths.
mmdet/datasets/coco_car.py
_filter_imgs
invite-you/mmdetection
python
def _filter_imgs(self, min_size=32): valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) ids_in_cat = set() for (i, class_id) in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) ids_in_cat &= ids_with_ann valid_img_ids = [] for (i, img_info) in enumerate(self.data_infos): img_id = self.img_ids[i] if (self.filter_empty_gt and (img_id not in ids_in_cat)): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds
def _parse_ann_info(self, img_info, ann_info): 'Parse bbox and mask annotation.\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n ' gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0))) inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0))) if ((inter_w * inter_h) == 0): continue if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue if (ann['category_id'] not in self.cat_ids): continue bbox = [x1, y1, (x1 + w), (y1 + h)] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann.get('segmentation', None)) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) seg_map = img_info['filename'].replace('jpg', 'png') ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map) return ann
-1,308,057,760,310,386,200
Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.
mmdet/datasets/coco_car.py
_parse_ann_info
invite-you/mmdetection
python
def _parse_ann_info(self, img_info, ann_info): 'Parse bbox and mask annotation.\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n ' gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0))) inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0))) if ((inter_w * inter_h) == 0): continue if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue if (ann['category_id'] not in self.cat_ids): continue bbox = [x1, y1, (x1 + w), (y1 + h)] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann.get('segmentation', None)) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) seg_map = img_info['filename'].replace('jpg', 'png') ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map) return ann
def xyxy2xywh(self, bbox): 'Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n evaluation.\n Args:\n bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n ``xyxy`` order.\n Returns:\n list[float]: The converted bounding boxes, in ``xywh`` order.\n ' _bbox = bbox.tolist() return [_bbox[0], _bbox[1], (_bbox[2] - _bbox[0]), (_bbox[3] - _bbox[1])]
6,002,676,184,223,694,000
Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO evaluation. Args: bbox (numpy.ndarray): The bounding boxes, shape (4, ), in ``xyxy`` order. Returns: list[float]: The converted bounding boxes, in ``xywh`` order.
mmdet/datasets/coco_car.py
xyxy2xywh
invite-you/mmdetection
python
def xyxy2xywh(self, bbox): 'Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n evaluation.\n Args:\n bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n ``xyxy`` order.\n Returns:\n list[float]: The converted bounding boxes, in ``xywh`` order.\n ' _bbox = bbox.tolist() return [_bbox[0], _bbox[1], (_bbox[2] - _bbox[0]), (_bbox[3] - _bbox[1])]
def _proposal2json(self, results): 'Convert proposal results to COCO json style.' json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results
1,776,314,576,809,435,600
Convert proposal results to COCO json style.
mmdet/datasets/coco_car.py
_proposal2json
invite-you/mmdetection
python
def _proposal2json(self, results): json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results
def _det2json(self, results): 'Convert detection results to COCO json style.' json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] json_results.append(data) return json_results
-8,234,219,059,450,971,000
Convert detection results to COCO json style.
mmdet/datasets/coco_car.py
_det2json
invite-you/mmdetection
python
def _det2json(self, results): json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] json_results.append(data) return json_results
def _segm2json(self, results): 'Convert instance segmentation results to COCO json style.' bbox_json_results = [] segm_json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] (det, seg) = results[idx] for label in range(len(det)): bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] bbox_json_results.append(data) if isinstance(seg, tuple): segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(mask_score[i]) data['category_id'] = self.cat_ids[label] if isinstance(segms[i]['counts'], bytes): segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) return (bbox_json_results, segm_json_results)
-3,094,880,850,971,942,400
Convert instance segmentation results to COCO json style.
mmdet/datasets/coco_car.py
_segm2json
invite-you/mmdetection
python
def _segm2json(self, results): bbox_json_results = [] segm_json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] (det, seg) = results[idx] for label in range(len(det)): bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] bbox_json_results.append(data) if isinstance(seg, tuple): segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(mask_score[i]) data['category_id'] = self.cat_ids[label] if isinstance(segms[i]['counts'], bytes): segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) return (bbox_json_results, segm_json_results)
def results2json(self, results, outfile_prefix): 'Dump the detection results to a COCO style json file.\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is "somepath/xxx", the json files will be named\n "somepath/xxx.bbox.json", "somepath/xxx.segm.json",\n "somepath/xxx.proposal.json".\n Returns:\n dict[str: str]: Possible keys are "bbox", "segm", "proposal", and values are corresponding filenames.\n ' result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files
9,173,968,849,306,380,000
Dump the detection results to a COCO style json file. There are 3 types of results: proposals, bbox predictions, mask predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. Args: results (list[list | tuple | ndarray]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.bbox.json", "somepath/xxx.segm.json", "somepath/xxx.proposal.json". Returns: dict[str: str]: Possible keys are "bbox", "segm", "proposal", and values are corresponding filenames.
mmdet/datasets/coco_car.py
results2json
invite-you/mmdetection
python
def results2json(self, results, outfile_prefix): 'Dump the detection results to a COCO style json file.\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is "somepath/xxx", the json files will be named\n "somepath/xxx.bbox.json", "somepath/xxx.segm.json",\n "somepath/xxx.proposal.json".\n Returns:\n dict[str: str]: Possible keys are "bbox", "segm", "proposal", and values are corresponding filenames.\n ' result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files
def format_results(self, results, jsonfile_prefix=None, **kwargs): 'Format the results to json (standard format for COCO evaluation).\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified.\n ' assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if (jsonfile_prefix is None): tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return (result_files, tmp_dir)
5,435,673,174,381,394,000
Format the results to json (standard format for COCO evaluation). Args: results (list[tuple | numpy.ndarray]): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified.
mmdet/datasets/coco_car.py
format_results
invite-you/mmdetection
python
def format_results(self, results, jsonfile_prefix=None, **kwargs): 'Format the results to json (standard format for COCO evaluation).\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified.\n ' assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if (jsonfile_prefix is None): tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): 'Evaluation in COCO protocol.\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n \'bbox\', \'segm\', \'proposal\', \'proposal_fast\'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``[\'AR@100\', \'AR@300\',\n \'AR@1000\', \'AR_s@1000\', \'AR_m@1000\', \'AR_l@1000\' ]`` will be\n used when ``metric==\'proposal\'``, ``[\'mAP\', \'mAP_50\', \'mAP_75\',\n \'mAP_s\', \'mAP_m\', \'mAP_l\']`` will be used when\n ``metric==\'bbox\' or metric==\'segm\'``.\n Returns:\n dict[str, float]: COCO style evaluation metric.\n ' metrics = (metric if isinstance(metric, list) else [metric]) allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if (metric not in allowed_metrics): raise KeyError(f'metric {metric} is not supported') if (iou_thrs is None): iou_thrs = np.linspace(0.5, 0.95, (int(np.round(((0.95 - 0.5) / 0.05))) + 1), endpoint=True) if (metric_items is not None): if (not isinstance(metric_items, list)): metric_items = [metric_items] (result_files, tmp_dir) = self.format_results(results, jsonfile_prefix) eval_results = {} cocoGt = self.coco for metric in metrics: msg = f'Evaluating {metric}...' if (logger is None): msg = ('\n' + msg) print_log(msg, logger=logger) if (metric == 'proposal_fast'): ar = self.fast_eval_recall(results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for (i, num) in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] log_msg.append(f''' AR@{num} {ar[i]:.4f}''') log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if (metric not in result_files): raise KeyError(f'{metric} is not in results') try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = ('bbox' if (metric == 'proposal') else metric) cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.catIds = self.cat_ids cocoEval.params.imgIds = self.img_ids cocoEval.params.maxDets = list(proposal_nums) cocoEval.params.iouThrs = iou_thrs coco_metric_names = {'mAP': 0, 'mAP_50': 1, 'mAP_75': 2, 'mAP_s': 3, 'mAP_m': 4, 'mAP_l': 5, 'AR@100': 6, 'AR@300': 7, 'AR@1000': 8, 'AR_s@1000': 9, 'AR_m@1000': 10, 'AR_l@1000': 11} if (metric_items is not None): for metric_item in metric_items: if (metric_item not in coco_metric_names): raise KeyError(f'metric item {metric_item} is not supported') if (metric == 'proposal'): cocoEval.params.useCats = 0 cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if (metric_items is None): metric_items = ['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000'] for item in metric_items: val = float(f'{cocoEval.stats[coco_metric_names[item]]:.3f}') eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: precisions = cocoEval.eval['precision'] assert (len(self.cat_ids) == precisions.shape[2]) results_per_category = [] for (idx, catId) in enumerate(self.cat_ids): nm = self.coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, (- 1)] precision = precision[(precision > (- 1))] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append((f"{nm['name']}", f'{float(ap):0.3f}')) num_columns = min(6, (len(results_per_category) * 2)) results_flatten = list(itertools.chain(*results_per_category)) headers = (['category', 'AP'] * (num_columns // 2)) results_2d = itertools.zip_longest(*[results_flatten[i::num_columns] for i in range(num_columns)]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log(('\n' + table.table), logger=logger) if (metric_items is None): metric_items = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] for metric_item in metric_items: key = f'{metric}_{metric_item}' val = float(f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}') eval_results[key] = val ap = cocoEval.stats[:6] eval_results[f'{metric}_mAP_copypaste'] = f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}' if (tmp_dir is not None): tmp_dir.cleanup() return eval_results
-2,850,738,235,883,896,000
Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric.
mmdet/datasets/coco_car.py
evaluate
invite-you/mmdetection
python
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): 'Evaluation in COCO protocol.\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n \'bbox\', \'segm\', \'proposal\', \'proposal_fast\'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``[\'AR@100\', \'AR@300\',\n \'AR@1000\', \'AR_s@1000\', \'AR_m@1000\', \'AR_l@1000\' ]`` will be\n used when ``metric==\'proposal\'``, ``[\'mAP\', \'mAP_50\', \'mAP_75\',\n \'mAP_s\', \'mAP_m\', \'mAP_l\']`` will be used when\n ``metric==\'bbox\' or metric==\'segm\'``.\n Returns:\n dict[str, float]: COCO style evaluation metric.\n ' metrics = (metric if isinstance(metric, list) else [metric]) allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if (metric not in allowed_metrics): raise KeyError(f'metric {metric} is not supported') if (iou_thrs is None): iou_thrs = np.linspace(0.5, 0.95, (int(np.round(((0.95 - 0.5) / 0.05))) + 1), endpoint=True) if (metric_items is not None): if (not isinstance(metric_items, list)): metric_items = [metric_items] (result_files, tmp_dir) = self.format_results(results, jsonfile_prefix) eval_results = {} cocoGt = self.coco for metric in metrics: msg = f'Evaluating {metric}...' if (logger is None): msg = ('\n' + msg) print_log(msg, logger=logger) if (metric == 'proposal_fast'): ar = self.fast_eval_recall(results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for (i, num) in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] log_msg.append(f' AR@{num} {ar[i]:.4f}') log_msg = .join(log_msg) print_log(log_msg, logger=logger) continue if (metric not in result_files): raise KeyError(f'{metric} is not in results') try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = ('bbox' if (metric == 'proposal') else metric) cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.catIds = self.cat_ids cocoEval.params.imgIds = self.img_ids cocoEval.params.maxDets = list(proposal_nums) cocoEval.params.iouThrs = iou_thrs coco_metric_names = {'mAP': 0, 'mAP_50': 1, 'mAP_75': 2, 'mAP_s': 3, 'mAP_m': 4, 'mAP_l': 5, 'AR@100': 6, 'AR@300': 7, 'AR@1000': 8, 'AR_s@1000': 9, 'AR_m@1000': 10, 'AR_l@1000': 11} if (metric_items is not None): for metric_item in metric_items: if (metric_item not in coco_metric_names): raise KeyError(f'metric item {metric_item} is not supported') if (metric == 'proposal'): cocoEval.params.useCats = 0 cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if (metric_items is None): metric_items = ['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000'] for item in metric_items: val = float(f'{cocoEval.stats[coco_metric_names[item]]:.3f}') eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: precisions = cocoEval.eval['precision'] assert (len(self.cat_ids) == precisions.shape[2]) results_per_category = [] for (idx, catId) in enumerate(self.cat_ids): nm = self.coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, (- 1)] precision = precision[(precision > (- 1))] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append((f"{nm['name']}", f'{float(ap):0.3f}')) num_columns = min(6, (len(results_per_category) * 2)) results_flatten = list(itertools.chain(*results_per_category)) headers = (['category', 'AP'] * (num_columns // 2)) results_2d = itertools.zip_longest(*[results_flatten[i::num_columns] for i in range(num_columns)]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log(('\n' + table.table), logger=logger) if (metric_items is None): metric_items = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] for metric_item in metric_items: key = f'{metric}_{metric_item}' val = float(f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}') eval_results[key] = val ap = cocoEval.stats[:6] eval_results[f'{metric}_mAP_copypaste'] = f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}' if (tmp_dir is not None): tmp_dir.cleanup() return eval_results
def _get_and_format(self, tags, key, format, convertfunc): '\n Gets element with "key" from dict "tags". Converts this data with\n convertfunc and inserts it into the formatstring "format".\n\n If "format" is None, the data is returned without formatting, conversion\n is done.\n\n It the key is not in the dict, the empty string is returned.\n ' data = tags.get(key, None) if data: data = convertfunc(str(data)) if format: return (format % data) return data return ''
2,398,577,797,941,107,000
Gets element with "key" from dict "tags". Converts this data with convertfunc and inserts it into the formatstring "format". If "format" is None, the data is returned without formatting, conversion is done. It the key is not in the dict, the empty string is returned.
image_exif/models.py
_get_and_format
svenhertle/django_image_exif
python
def _get_and_format(self, tags, key, format, convertfunc): '\n Gets element with "key" from dict "tags". Converts this data with\n convertfunc and inserts it into the formatstring "format".\n\n If "format" is None, the data is returned without formatting, conversion\n is done.\n\n It the key is not in the dict, the empty string is returned.\n ' data = tags.get(key, None) if data: data = convertfunc(str(data)) if format: return (format % data) return data return
def test_fixture(): 'Test Fixtures.' assert (dir1 and dir2 and ttorrent and wind)
8,045,601,637,958,376,000
Test Fixtures.
tests/test_checktab.py
test_fixture
alexpdev/Torrentfile-GUI
python
def test_fixture(): assert (dir1 and dir2 and ttorrent and wind)
def test_missing_files_check(dir2, ttorrent, wind): 'Test missing files checker proceduire.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) dirpath = Path(dir2) for item in dirpath.iterdir(): if item.is_file(): os.remove(item) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir2) checktab.checkButton.click() assert (checktab.treeWidget.topLevelItemCount() > 0)
-2,974,906,744,892,642,000
Test missing files checker proceduire.
tests/test_checktab.py
test_missing_files_check
alexpdev/Torrentfile-GUI
python
def test_missing_files_check(dir2, ttorrent, wind): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) dirpath = Path(dir2) for item in dirpath.iterdir(): if item.is_file(): os.remove(item) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir2) checktab.checkButton.click() assert (checktab.treeWidget.topLevelItemCount() > 0)
def test_shorter_files_check(wind, ttorrent, dir2): 'Test missing files checker proceduire.' (window, _) = wind checktab = window.central.checkWidget dirpath = Path(dir2) window.central.setCurrentWidget(checktab) def shortenfile(item): 'Shave some data off the end of file.' temp = bytearray((2 ** 19)) with open(item, 'rb') as fd: fd.readinto(temp) with open(item, 'wb') as fd: fd.write(temp) if os.path.exists(dirpath): for item in dirpath.iterdir(): if item.is_file(): shortenfile(item) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir2) checktab.checkButton.click() assert (checktab.treeWidget.topLevelItemCount() > 0)
1,682,717,630,852,873,000
Test missing files checker proceduire.
tests/test_checktab.py
test_shorter_files_check
alexpdev/Torrentfile-GUI
python
def test_shorter_files_check(wind, ttorrent, dir2): (window, _) = wind checktab = window.central.checkWidget dirpath = Path(dir2) window.central.setCurrentWidget(checktab) def shortenfile(item): 'Shave some data off the end of file.' temp = bytearray((2 ** 19)) with open(item, 'rb') as fd: fd.readinto(temp) with open(item, 'wb') as fd: fd.write(temp) if os.path.exists(dirpath): for item in dirpath.iterdir(): if item.is_file(): shortenfile(item) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir2) checktab.checkButton.click() assert (checktab.treeWidget.topLevelItemCount() > 0)
def test_check_tab(wind, ttorrent, dir1): 'Test checker procedure.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir1) checktab.checkButton.click() assert (checktab.textEdit.toPlainText() != '')
283,783,477,430,735,520
Test checker procedure.
tests/test_checktab.py
test_check_tab
alexpdev/Torrentfile-GUI
python
def test_check_tab(wind, ttorrent, dir1): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.fileInput.setText(ttorrent) checktab.searchInput.setText(dir1) checktab.checkButton.click() assert (checktab.textEdit.toPlainText() != )
def test_check_tab_input1(wind, dir1): 'Test checker procedure.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.browseButton2.browse(dir1) assert (checktab.searchInput.text() != '')
-1,956,773,412,232,258,600
Test checker procedure.
tests/test_checktab.py
test_check_tab_input1
alexpdev/Torrentfile-GUI
python
def test_check_tab_input1(wind, dir1): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.browseButton2.browse(dir1) assert (checktab.searchInput.text() != )
def test_check_tab_input_2(wind, dir1): 'Test checker procedure.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.browseButton1.browse(dir1) assert (checktab.fileInput.text() != '')
-2,293,029,483,697,940,200
Test checker procedure.
tests/test_checktab.py
test_check_tab_input_2
alexpdev/Torrentfile-GUI
python
def test_check_tab_input_2(wind, dir1): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) checktab.browseButton1.browse(dir1) assert (checktab.fileInput.text() != )
def test_check_tab4(wind): 'Test checker procedure again.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) tree_widget = checktab.treeWidget assert (tree_widget.invisibleRootItem() is not None)
7,435,339,097,934,807,000
Test checker procedure again.
tests/test_checktab.py
test_check_tab4
alexpdev/Torrentfile-GUI
python
def test_check_tab4(wind): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) tree_widget = checktab.treeWidget assert (tree_widget.invisibleRootItem() is not None)
def test_clear_logtext(wind): 'Test checker logTextEdit widget function.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) text_edit = checktab.textEdit text_edit.insertPlainText('sometext') text_edit.clear_data() assert (text_edit.toPlainText() == '')
7,770,702,326,506,262,000
Test checker logTextEdit widget function.
tests/test_checktab.py
test_clear_logtext
alexpdev/Torrentfile-GUI
python
def test_clear_logtext(wind): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) text_edit = checktab.textEdit text_edit.insertPlainText('sometext') text_edit.clear_data() assert (text_edit.toPlainText() == )
def test_checktab_tree(wind): 'Check tree item counting functionality.' (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) tree = TreeWidget(parent=checktab) item = TreePieceItem(type=0, tree=tree) item.progbar = ProgressBar(parent=tree, size=1000000) item.count(100000000) assert (item.counted == 1000000)
-3,247,643,926,909,501,000
Check tree item counting functionality.
tests/test_checktab.py
test_checktab_tree
alexpdev/Torrentfile-GUI
python
def test_checktab_tree(wind): (window, _) = wind checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) tree = TreeWidget(parent=checktab) item = TreePieceItem(type=0, tree=tree) item.progbar = ProgressBar(parent=tree, size=1000000) item.count(100000000) assert (item.counted == 1000000)
@pytest.mark.parametrize('size', list(range(18, 20))) @pytest.mark.parametrize('index', list(range(1, 7, 2))) @pytest.mark.parametrize('version', [1, 2, 3]) @pytest.mark.parametrize('ext', ['.mkv', '.rar', '.r00', '.mp3']) def test_singlefile(size, ext, index, version, wind): 'Test the singlefile for create and check tabs.' (window, _) = wind createtab = window.central.createWidget checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) testfile = str(tempfile(exp=size)) tfile = (testfile + ext) os.rename(testfile, tfile) metafile = (tfile + '.torrent') createtab.path_input.clear() createtab.output_input.clear() createtab.browse_file_button.browse(tfile) createtab.output_input.setText(metafile) createtab.piece_length.setCurrentIndex(index) btns = [createtab.v1button, createtab.v2button, createtab.hybridbutton] for (i, btn) in enumerate(btns): if ((i + 1) == version): btn.click() break createtab.submit_button.click() createtab.submit_button.join() checktab.fileInput.clear() checktab.searchInput.clear() checktab.fileInput.setText(metafile) checktab.searchInput.setText(tfile) checktab.checkButton.click() ptext = checktab.textEdit.toPlainText() assert ('100%' in ptext) rmpath(tfile, metafile)
-7,495,223,836,807,719,000
Test the singlefile for create and check tabs.
tests/test_checktab.py
test_singlefile
alexpdev/Torrentfile-GUI
python
@pytest.mark.parametrize('size', list(range(18, 20))) @pytest.mark.parametrize('index', list(range(1, 7, 2))) @pytest.mark.parametrize('version', [1, 2, 3]) @pytest.mark.parametrize('ext', ['.mkv', '.rar', '.r00', '.mp3']) def test_singlefile(size, ext, index, version, wind): (window, _) = wind createtab = window.central.createWidget checktab = window.central.checkWidget window.central.setCurrentWidget(checktab) testfile = str(tempfile(exp=size)) tfile = (testfile + ext) os.rename(testfile, tfile) metafile = (tfile + '.torrent') createtab.path_input.clear() createtab.output_input.clear() createtab.browse_file_button.browse(tfile) createtab.output_input.setText(metafile) createtab.piece_length.setCurrentIndex(index) btns = [createtab.v1button, createtab.v2button, createtab.hybridbutton] for (i, btn) in enumerate(btns): if ((i + 1) == version): btn.click() break createtab.submit_button.click() createtab.submit_button.join() checktab.fileInput.clear() checktab.searchInput.clear() checktab.fileInput.setText(metafile) checktab.searchInput.setText(tfile) checktab.checkButton.click() ptext = checktab.textEdit.toPlainText() assert ('100%' in ptext) rmpath(tfile, metafile)
def shortenfile(item): 'Shave some data off the end of file.' temp = bytearray((2 ** 19)) with open(item, 'rb') as fd: fd.readinto(temp) with open(item, 'wb') as fd: fd.write(temp)
-8,256,249,821,792,937,000
Shave some data off the end of file.
tests/test_checktab.py
shortenfile
alexpdev/Torrentfile-GUI
python
def shortenfile(item): temp = bytearray((2 ** 19)) with open(item, 'rb') as fd: fd.readinto(temp) with open(item, 'wb') as fd: fd.write(temp)
def coro(gen): 'Decorator to mark generator as co-routine.' @wraps(gen) def wind_up(*args, **kwargs): it = gen(*args, **kwargs) next(it) return it return wind_up
543,768,862,196,839,230
Decorator to mark generator as co-routine.
kombu/utils/compat.py
coro
CountRedClaw/kombu
python
def coro(gen): @wraps(gen) def wind_up(*args, **kwargs): it = gen(*args, **kwargs) next(it) return it return wind_up
def detect_environment(): 'Detect the current environment: default, eventlet, or gevent.' global _environment if (_environment is None): _environment = _detect_environment() return _environment
-5,701,659,537,937,548,000
Detect the current environment: default, eventlet, or gevent.
kombu/utils/compat.py
detect_environment
CountRedClaw/kombu
python
def detect_environment(): global _environment if (_environment is None): _environment = _detect_environment() return _environment
def entrypoints(namespace): 'Return setuptools entrypoints for namespace.' if (sys.version_info >= (3, 10)): entry_points = importlib_metadata.entry_points(group=namespace) else: entry_points = importlib_metadata.entry_points().get(namespace, []) return ((ep, ep.load()) for ep in entry_points)
3,997,614,384,926,604,000
Return setuptools entrypoints for namespace.
kombu/utils/compat.py
entrypoints
CountRedClaw/kombu
python
def entrypoints(namespace): if (sys.version_info >= (3, 10)): entry_points = importlib_metadata.entry_points(group=namespace) else: entry_points = importlib_metadata.entry_points().get(namespace, []) return ((ep, ep.load()) for ep in entry_points)
def fileno(f): 'Get fileno from file-like object.' if isinstance(f, numbers.Integral): return f return f.fileno()
5,161,474,401,131,961,000
Get fileno from file-like object.
kombu/utils/compat.py
fileno
CountRedClaw/kombu
python
def fileno(f): if isinstance(f, numbers.Integral): return f return f.fileno()
def maybe_fileno(f): 'Get object fileno, or :const:`None` if not defined.' try: return fileno(f) except FILENO_ERRORS: pass
8,165,246,639,734,941,000
Get object fileno, or :const:`None` if not defined.
kombu/utils/compat.py
maybe_fileno
CountRedClaw/kombu
python
def maybe_fileno(f): try: return fileno(f) except FILENO_ERRORS: pass
@contextmanager def nested(*managers): 'Nest context managers.' exits = [] vars = [] exc = (None, None, None) try: try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) (yield vars) except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if (exc != (None, None, None)): reraise(exc[0], exc[1], exc[2]) finally: del exc
-6,615,287,256,100,333,000
Nest context managers.
kombu/utils/compat.py
nested
CountRedClaw/kombu
python
@contextmanager def nested(*managers): exits = [] vars = [] exc = (None, None, None) try: try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) (yield vars) except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if (exc != (None, None, None)): reraise(exc[0], exc[1], exc[2]) finally: del exc