index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
20,528
pyobjectify.pyobjectify
__init__
null
def __init__(self, url, connectivity): url = url.replace("file://", "") self.url = url self.connectivity = connectivity if connectivity == Connectivity.ONLINE_STATIC: response = get(url) self.response = response self.plaintext = response.text elif connectivity == Connectivity.LOCAL: url = url.replace("file://", "") file_obj = open(url, "r") self.response = file_obj try: self.plaintext = file_obj.read() self.response.seek(0, 0) except Exception: # XLSX data does not like to be read self.plaintext = None
(self, url, connectivity)
20,530
pyobjectify.pyobjectify
convert
Attempts to convert the resource data through possible conversions. The end-user does not have to interface with this, but it is provided for more granular operations. Args: resource (:obj:`Resource`): he Resource object for the resource. conversions (list): The list of all possible conversions, filtered if user specified output data type. Returns: object: The first successful conversion from the probable resource type to an output data type. Raises: TypeError: None of the possible conversions were successful.
def convert(resource, conversions): """ Attempts to convert the resource data through possible conversions. The end-user does not have to interface with this, but it is provided for more granular operations. Args: resource (:obj:`Resource`): he Resource object for the resource. conversions (list): The list of all possible conversions, filtered if user specified output data type. Returns: object: The first successful conversion from the probable resource type to an output data type. Raises: TypeError: None of the possible conversions were successful. """ for conversion in conversions: try: i_type, o_type = conversion # Handle each case. Currently, only JSON has multiple options. # Return the first conversion that works. if i_type is InputType.JSON: if o_type is dict: return json_to_dict(resource) elif o_type is list: return json_to_list(resource) elif o_type is DataFrame: return json_to_dataframe(resource) elif i_type is InputType.CSV: return csv_to_list(resource) elif i_type is InputType.TSV: return tsv_to_list(resource) elif i_type is InputType.XML: return xml_to_dict(resource) elif i_type is InputType.XLSX: return xlsx_to_dict(resource) except Exception: continue # Try the next conversion # Reach here means none of the conversions worked! raise TypeError("The type of the resource is not supported.")
(resource, conversions)
20,531
pyobjectify.pyobjectify
from_url
This is the main interface that the end-user interacts with. Given a URL, converts the resource data to a parsable Python object. Args: url (str): A URL to a resource. out_type (:obj:`class`, optional): The user-specified data type of the output. Returns: object: A parsable Python object representation of the resource. Raises: TypeError: The user-specified data type of the output is not supported.
def from_url(url, out_type=None): """ This is the main interface that the end-user interacts with. Given a URL, converts the resource data to a parsable Python object. Args: url (str): A URL to a resource. out_type (:obj:`class`, optional): The user-specified data type of the output. Returns: object: A parsable Python object representation of the resource. Raises: TypeError: The user-specified data type of the output is not supported. """ if out_type is not None and out_type not in OUTPUT_TYPES: raise TypeError(f"The specified output type {out_type} is not supported.") # (1) Get resource connectivity type connectivity = url_to_connectivity(url) # (2) Retrieve resource resource = retrieve_resource(url, connectivity) # (3) Determine input type in_types = get_resource_types(resource) # (4) Determine possible conversions conversions = get_conversions(in_types, out_type) # (5) Convert to output type output = convert(resource, conversions) # (6) Close the file object if open()-ed if connectivity is Connectivity.LOCAL: resource.response.close() return output
(url, out_type=None)
20,532
pyobjectify.pyobjectify
get_conversions
Get possible conversions for the probable resource types. If the user specified a preferred output type, filter out any undesirable conversions to consider. The end-user does not have to interface with this, but it is provided for more granular operations. Args: in_types (:obj:`list`): A list of calculated possible resource types. out_type (:obj:`class`, optional): The user-specified data type of the output. Returns: list: A list of (in, out) conversion tuples as described above. Raises: TypeError: There are no possible conversions.
def get_conversions(in_types, out_type=None): """ Get possible conversions for the probable resource types. If the user specified a preferred output type, filter out any undesirable conversions to consider. The end-user does not have to interface with this, but it is provided for more granular operations. Args: in_types (:obj:`list`): A list of calculated possible resource types. out_type (:obj:`class`, optional): The user-specified data type of the output. Returns: list: A list of (in, out) conversion tuples as described above. Raises: TypeError: There are no possible conversions. """ # There is guaranteed at least one probable in_type # Go through each probable resource data type. # Use lists to preserve order. conversions = [] # To make a list of possible conversions. poss_out_types = [] # To make list of all output types based on probable input types. for in_type in in_types: for poss_out_type in CONVERSIONS[in_type]: if out_type is None: if (in_type, poss_out_type) not in conversions: conversions.append((in_type, poss_out_type)) if poss_out_type not in poss_out_types: poss_out_types.append(poss_out_type) elif poss_out_type is out_type: # (and user specified a preferred output type) if (in_type, poss_out_type) not in conversions: conversions.append((in_type, poss_out_type)) if poss_out_type not in poss_out_types: poss_out_types.append(poss_out_type) if out_type is not None and out_type not in poss_out_types: raise TypeError(f"The resource cannot be converted into the requested data type {out_type}.") return conversions
(in_types, out_type=None)
20,533
pyobjectify.pyobjectify
get_resource_types
Get possible resource types of the resource. The end-user does not have to interface with this, but it is provided for more granular operations. Args: resource (:obj:`Resource`): The Resource object for the resource. Returns: list: A list of possible resource types of the resource. Raises: TypeError: The resource is of a type that is not supported.
def get_resource_types(resource): """ Get possible resource types of the resource. The end-user does not have to interface with this, but it is provided for more granular operations. Args: resource (:obj:`Resource`): The Resource object for the resource. Returns: list: A list of possible resource types of the resource. Raises: TypeError: The resource is of a type that is not supported. """ possible = list(InputType) try: _ = loads(resource.plaintext) except Exception: possible.remove(InputType.JSON) pass try: dicts = DictReader(resource.response) if resource.connectivity is Connectivity.LOCAL: resource.response.seek(0, 0) # Ensure that each row has the same number of fields nums_fields = set([len(d.items()) for d in list(dicts)]) if resource.connectivity is Connectivity.LOCAL: resource.response.seek(0, 0) assert len(nums_fields) == 1 # Ensure that the number of fields is greater than 1 (num_fields,) = nums_fields assert num_fields > 1 # Data that have only one column will not be interpreted as CSVs except Exception: possible.remove(InputType.CSV) try: dicts = DictReader(resource.response, delimiter="\t") if resource.connectivity is Connectivity.LOCAL: resource.response.seek(0, 0) # Ensure that each row has the same number of fields nums_fields = set([len(d.items()) for d in list(dicts)]) if resource.connectivity is Connectivity.LOCAL: resource.response.seek(0, 0) assert len(nums_fields) == 1 # Ensure that the number of fields is greater than 1 (num_fields,) = nums_fields assert num_fields > 1 # Data that have only one column will not be interpreted as TSVs except Exception: possible.remove(InputType.TSV) try: _ = parse(resource.plaintext) assert resource.plaintext[0] == "<" except Exception: possible.remove(InputType.XML) try: df = read_excel(resource.url, sheet_name=None) sheets_dict = {} for sheet_name, df in df.items(): sheets_dict[sheet_name] = df.to_dict() except Exception: possible.remove(InputType.XLSX) if len(possible) == 0: raise TypeError("The type of the resource is not supported.") return possible
(resource)
20,535
pyobjectify.pyobjectify
retrieve_resource
Retrieves the resource at the URL using the connectivity type and stores it in a Resource object. The end-user does not have to interface with this, but it is provided for more granular operations. Args: url (str): The URL to a resource. connectivity (:obj:`Connectivity`): An attribute in the enumeration Connectivity. The calculated connectivity type of the resource. Returns: Resource: The Resource object for the resource at the URL specified. Raises: TypeError: The connectivity type is not supported.
def retrieve_resource(url, connectivity): """ Retrieves the resource at the URL using the connectivity type and stores it in a Resource object. The end-user does not have to interface with this, but it is provided for more granular operations. Args: url (str): The URL to a resource. connectivity (:obj:`Connectivity`): An attribute in the enumeration Connectivity. The calculated connectivity type of the resource. Returns: Resource: The Resource object for the resource at the URL specified. Raises: TypeError: The connectivity type is not supported. """ if not isinstance(connectivity, Connectivity): raise TypeError(f"The connectivity type {connectivity} is not supported.") return Resource(url, connectivity)
(url, connectivity)
20,536
pyobjectify.pyobjectify
url_to_connectivity
Get the connectivity type of the resource. The end-user does not have to interface with this, but it is provided for more granular operations. Args: url (str): The URL to a resource. Returns: Connectivity: An attribute in the enumeration `Connectivity`. The calculated connectivity of the resource type.
def url_to_connectivity(url): """ Get the connectivity type of the resource. The end-user does not have to interface with this, but it is provided for more granular operations. Args: url (str): The URL to a resource. Returns: Connectivity: An attribute in the enumeration `Connectivity`. The calculated connectivity of the resource type. """ local_conditions = [url.startswith("file:///"), url.startswith("/"), url.startswith(".")] if any(local_conditions): return Connectivity.LOCAL else: return Connectivity.ONLINE_STATIC
(url)
20,537
pyratings.utils
_extract_rating_provider
Extract valid rating providers. It is meant to extract rating providers from the column headings of a ``pd.DataFrame``. For example, let's assume some rating column headers are ["rating_fitch", "S&P rating", "BLOOMBERG composite rating"]. The function would then return a list of valid rating providers, namely ["Fitch", "SP", "Bloomberg"]. Parameters ---------- rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. valid_rtg_provider List of strings containing the names of valid rating providers. Supported rating providers are {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. 'rating_provider' must be in that list. Returns ------- Union[str, List[str]] str or List[str] with valid rating providers. Raises ------ AssertionError If ``rating_provider`` is not a subset of `valid_rtg_provider`. Examples -------- >>> _extract_rating_provider( ... rating_provider="S&P", ... valid_rtg_provider=["fitch", "s&p", "moody"], ... ) 'SP' >>> _extract_rating_provider( ... rating_provider="rtg_DBRS", ... valid_rtg_provider=["Fitch", "SP", "DBRS"] ... ) 'DBRS' You can also provide a list of strings. >>> _extract_rating_provider( ... rating_provider=["Fitch ratings", "rating_SP", "DBRS"], ... valid_rtg_provider=["fitch", "moody", "sp", "bloomberg", "dbrs"] ... ) ['Fitch', 'SP', 'DBRS']
def _extract_rating_provider( rating_provider: str | list[str] | Hashable, valid_rtg_provider: list[str], ) -> str | list[str]: """Extract valid rating providers. It is meant to extract rating providers from the column headings of a ``pd.DataFrame``. For example, let's assume some rating column headers are ["rating_fitch", "S&P rating", "BLOOMBERG composite rating"]. The function would then return a list of valid rating providers, namely ["Fitch", "SP", "Bloomberg"]. Parameters ---------- rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. valid_rtg_provider List of strings containing the names of valid rating providers. Supported rating providers are {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. 'rating_provider' must be in that list. Returns ------- Union[str, List[str]] str or List[str] with valid rating providers. Raises ------ AssertionError If ``rating_provider`` is not a subset of `valid_rtg_provider`. Examples -------- >>> _extract_rating_provider( ... rating_provider="S&P", ... valid_rtg_provider=["fitch", "s&p", "moody"], ... ) 'SP' >>> _extract_rating_provider( ... rating_provider="rtg_DBRS", ... valid_rtg_provider=["Fitch", "SP", "DBRS"] ... ) 'DBRS' You can also provide a list of strings. >>> _extract_rating_provider( ... rating_provider=["Fitch ratings", "rating_SP", "DBRS"], ... valid_rtg_provider=["fitch", "moody", "sp", "bloomberg", "dbrs"] ... ) ['Fitch', 'SP', 'DBRS'] """ provider_map = { "fitch": "Fitch", "moody": "Moody", "moody's": "Moody", "sp": "SP", "s&p": "SP", "bloomberg": "Bloomberg", "dbrs": "DBRS", } if isinstance(rating_provider, str): rating_provider = [rating_provider] valid_rtg_provider_lowercase = [x.lower() for x in valid_rtg_provider] for i, provider in enumerate(rating_provider): if not any(x in provider.lower() for x in valid_rtg_provider_lowercase): raise AssertionError( f"{provider!r} is not a valid rating provider. 'rating_provider' must " f"be in {valid_rtg_provider}." ) for valid_provider in valid_rtg_provider: if valid_provider.lower() in provider.lower(): rating_provider[i] = provider_map[valid_provider.lower()] if len(rating_provider) > 1: return rating_provider else: return rating_provider[0]
(rating_provider: str | list[str] | collections.abc.Hashable, valid_rtg_provider: list[str]) -> str | list[str]
20,538
pyratings.utils
_get_translation_dict
Load translation dictionaries from SQLite database.
def _get_translation_dict( translation_table: str, rating_provider: str = None, tenor: str = "long-term", st_rtg_strategy: str = "base", ) -> dict | pd.DataFrame: """Load translation dictionaries from SQLite database.""" def _rtg_to_scores(tenor: str) -> dict[str, int]: """Create translation dictionary to translate from ratings to scores.""" if tenor == "long-term": sql_query = """ SELECT Rating, RatingScore FROM v_ltRatings WHERE RatingProvider=? """ cursor.execute(sql_query, (rating_provider,)) else: sql_query = """ SELECT Rating, AvgEquivLTScore FROM v_stRatings WHERE RatingProvider=? and Strategy=? """ cursor.execute(sql_query, (rating_provider, st_rtg_strategy)) return dict(cursor.fetchall()) def _scores_to_rtg(tenor: str, strat: str) -> dict[int, str] | pd.DataFrame: """Create translation dictionary to translate from scores to ratings.""" if tenor == "long-term": sql_query = """ SELECT RatingScore, Rating FROM v_ltRatings WHERE Rating != 'SD' and RatingProvider=? """ cursor.execute(sql_query, (rating_provider,)) translation_dict = dict(cursor.fetchall()) else: sql_query = """ SELECT MinEquivLTScore, MaxEquivLTScore, Rating FROM v_stRatings WHERE RatingProvider=? and Strategy=? ORDER BY MaxEquivLTScore """ cursor.execute(sql_query, (rating_provider, strat)) translation_dict = pd.DataFrame.from_records( cursor.fetchall(), columns=["MinScore", "MaxScore", "Rating"] ) return translation_dict def _scores_to_warf() -> dict[int, int]: """Create translation dictionary to translate from scores to WARFs.""" sql_query = "SELECT RatingScore, WARF FROM WARFs" cursor.execute(sql_query) return dict(cursor.fetchall()) # connect to database connection = sqlite3.connect(str(RATINGS_DB)) cursor = connection.cursor() if translation_table == "rtg_to_scores": translation_dict = _rtg_to_scores(tenor=tenor) elif translation_table == "scores_to_rtg": translation_dict = _scores_to_rtg(tenor=tenor, strat=st_rtg_strategy) else: translation_dict = _scores_to_warf() # close database connection connection.close() return translation_dict
(translation_table: str, rating_provider: Optional[str] = None, tenor: str = 'long-term', st_rtg_strategy: str = 'base') -> dict | pandas.core.frame.DataFrame
20,542
pyratings.consolidate
consolidate_ratings
Consolidate ratings on a security level basis across rating agencies . Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) method Defines the method that will be used in order to consolidate the ratings on a security level basis across rating agencies. Valid methods are {"best", "second_best", "worst"}. rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Consolidated ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) Identify the best ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="best", ... rating_provider_input=["S&P", "Moody", "Fitch"], ... rating_provider_output="Moody", ... ) 0 Aaa 1 Aa3 2 Aa1 3 Ba3 4 Ca Name: best_rtg, dtype: object Identify the second-best ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="second_best", ... rating_provider_input=["S&P", "Moody", "Fitch"], ... rating_provider_output="DBRS", ... ) 0 AAH 1 AAL 2 AA 3 BBL 4 C Name: second_best_rtg, dtype: object Identify the worst ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="worst", ... rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 AA- 1 AA- 2 AA- 3 B+ 4 C Name: worst_rtg, dtype: object
def consolidate_ratings( ratings: pd.DataFrame, method: Literal["best", "second_best", "worst"] = "worst", rating_provider_input: list[str] = None, rating_provider_output: Literal[ "Fitch", "Moody", "S&P", "Bloomberg", "DBRS" ] = "S&P", tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Consolidate ratings on a security level basis across rating agencies . Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) method Defines the method that will be used in order to consolidate the ratings on a security level basis across rating agencies. Valid methods are {"best", "second_best", "worst"}. rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Consolidated ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) Identify the best ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="best", ... rating_provider_input=["S&P", "Moody", "Fitch"], ... rating_provider_output="Moody", ... ) 0 Aaa 1 Aa3 2 Aa1 3 Ba3 4 Ca Name: best_rtg, dtype: object Identify the second-best ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="second_best", ... rating_provider_input=["S&P", "Moody", "Fitch"], ... rating_provider_output="DBRS", ... ) 0 AAH 1 AAL 2 AA 3 BBL 4 C Name: second_best_rtg, dtype: object Identify the worst ratings: >>> consolidate_ratings( ... ratings=ratings_df, ... method="worst", ... rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 AA- 1 AA- 2 AA- 3 B+ 4 C Name: worst_rtg, dtype: object """ func = { "best": get_best_scores, "second_best": get_second_best_scores, "worst": get_worst_scores, } # translate ratings -> scores rating_scores_series = func[method]( ratings, rating_provider_input=rating_provider_input, tenor=tenor ) # convert back to ratings ratings_series = get_ratings_from_scores( rating_scores=rating_scores_series, rating_provider=rating_provider_output, tenor=tenor, ) ratings_series.name = f"{method}_rtg" return ratings_series
(ratings: pandas.core.frame.DataFrame, method: Literal['best', 'second_best', 'worst'] = 'worst', rating_provider_input: Optional[list[str]] = None, rating_provider_output: Literal['Fitch', 'Moody', 'S&P', 'Bloomberg', 'DBRS'] = 'S&P', tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,543
pyratings.consolidate
get_best_ratings
Compute the best rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Best ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_best_ratings(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 AAA 1 AA- 2 AA+ 3 BB- 4 CC Name: best_rtg, dtype: object
def get_best_ratings( ratings: pd.DataFrame, rating_provider_input: list[str] = None, rating_provider_output: Literal[ "Fitch", "Moody", "S&P", "Bloomberg", "DBRS" ] = "S&P", tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the best rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Best ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_best_ratings(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 AAA 1 AA- 2 AA+ 3 BB- 4 CC Name: best_rtg, dtype: object """ ratings_series = consolidate_ratings( method="best", ratings=ratings, rating_provider_input=rating_provider_input, rating_provider_output=rating_provider_output, tenor=tenor, ) return ratings_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, rating_provider_output: Literal['Fitch', 'Moody', 'S&P', 'Bloomberg', 'DBRS'] = 'S&P', tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,544
pyratings.consolidate
get_best_scores
Compute the best rating scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Best rating scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_best_scores( ... ratings=ratings_df, ... rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 1 1 4 2 2 3 13 4 20 Name: best_scores, dtype: int64
def get_best_scores( ratings: pd.DataFrame, rating_provider_input: list[str] = None, tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the best rating scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Best rating scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_best_scores( ... ratings=ratings_df, ... rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 1 1 4 2 2 3 13 4 20 Name: best_scores, dtype: int64 """ rating_scores_df = get_scores_from_ratings( ratings=ratings, rating_provider=rating_provider_input, tenor=tenor ) rating_scores_series = rating_scores_df.min(axis=1) rating_scores_series.name = "best_scores" return rating_scores_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,545
pyratings.clean
get_pure_ratings
Remove rating watches/outlooks and other non-actual-rating related information. Ratings may contain watch, such as 'AA- *+', 'BBB+ (CwNegative)'. Outlook/watch should be seperated by a blank from the actual rating. Also, ratings may also contain the letter 'u' (unsolicited) or be prefixed by '(P)' (public information only). This kind of information will be removed to retrieve the actual rating(s). Parameters ---------- ratings Uncleaned rating(s). Returns ------- Union[str, pd.Series, pd.DataFrame] Regular ratings stripped off of watches. The name of the resulting Series or the columns of the returning DataFrame will be suffixed with `_clean`. Examples -------- Cleaning a single rating: >>> get_pure_ratings("AA- *+") 'AA-' >>> get_pure_ratings("Au") 'A' >>> get_pure_ratings("(P)P-2") 'P-2' Cleaning a `pd.Series`: >>> import numpy as np >>> import pandas as pd >>> rating_series=pd.Series( ... data=[ ... "BB+ *-", ... "(P)BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... name="rtg_SP", ... ) >>> get_pure_ratings(rating_series) 0 BB+ 1 BBB 2 NaN 3 AA- 4 NaN 5 CCC+ 6 BB+ Name: rtg_SP_clean, dtype: object Cleaning a `pd.DataFrame`: >>> rtg_df = pd.DataFrame( ... data={ ... "rtg_SP": [ ... "BB+ *-", ... "BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... "rtg_Fitch": [ ... "BB+ *-", ... "BBB *+", ... pd.NA, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... }, ... ) >>> get_pure_ratings(rtg_df) rtg_SP_clean rtg_Fitch_clean 0 BB+ BB+ 1 BBB BBB 2 NaN <NA> 3 AA- AA- 4 NaN NaN 5 CCC+ CCC+ 6 BB+ BB+
def get_pure_ratings( ratings: str | pd.Series | pd.DataFrame, ) -> str | pd.Series | pd.DataFrame: """Remove rating watches/outlooks and other non-actual-rating related information. Ratings may contain watch, such as 'AA- *+', 'BBB+ (CwNegative)'. Outlook/watch should be seperated by a blank from the actual rating. Also, ratings may also contain the letter 'u' (unsolicited) or be prefixed by '(P)' (public information only). This kind of information will be removed to retrieve the actual rating(s). Parameters ---------- ratings Uncleaned rating(s). Returns ------- Union[str, pd.Series, pd.DataFrame] Regular ratings stripped off of watches. The name of the resulting Series or the columns of the returning DataFrame will be suffixed with `_clean`. Examples -------- Cleaning a single rating: >>> get_pure_ratings("AA- *+") 'AA-' >>> get_pure_ratings("Au") 'A' >>> get_pure_ratings("(P)P-2") 'P-2' Cleaning a `pd.Series`: >>> import numpy as np >>> import pandas as pd >>> rating_series=pd.Series( ... data=[ ... "BB+ *-", ... "(P)BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... name="rtg_SP", ... ) >>> get_pure_ratings(rating_series) 0 BB+ 1 BBB 2 NaN 3 AA- 4 NaN 5 CCC+ 6 BB+ Name: rtg_SP_clean, dtype: object Cleaning a `pd.DataFrame`: >>> rtg_df = pd.DataFrame( ... data={ ... "rtg_SP": [ ... "BB+ *-", ... "BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... "rtg_Fitch": [ ... "BB+ *-", ... "BBB *+", ... pd.NA, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... }, ... ) >>> get_pure_ratings(rtg_df) rtg_SP_clean rtg_Fitch_clean 0 BB+ BB+ 1 BBB BBB 2 NaN <NA> 3 AA- AA- 4 NaN NaN 5 CCC+ CCC+ 6 BB+ BB+ """ if isinstance(ratings, str): ratings = ( ratings.split()[0].rstrip("uU").removeprefix("(p)").removeprefix("(P)") ) return ratings elif isinstance(ratings, pd.Series): # identify string occurrences isstring = ratings.apply(type).eq(str) # strip string after occurrence of very first blank and strip character 'u', # which has usually been added without a blank; # also remove suffix '(p)' and '(P)' ratings[isstring] = ratings[isstring].str.split().str[0] ratings[isstring] = ratings[isstring].str.rstrip("uU") ratings[isstring] = ratings[isstring].str.removeprefix("(p)") ratings[isstring] = ratings[isstring].str.removeprefix("(P)") ratings.name = f"{ratings.name}_clean" return ratings elif isinstance(ratings, pd.DataFrame): # Recursive call of `get_pure_ratings` return pd.concat( [get_pure_ratings(ratings=ratings[col]) for col in ratings.columns], axis=1 )
(ratings: str | pandas.core.series.Series | pandas.core.frame.DataFrame) -> str | pandas.core.series.Series | pandas.core.frame.DataFrame
20,547
pyratings.get_ratings
get_ratings_from_scores
Convert numerical rating scores into regular ratings. Parameters ---------- rating_scores Numerical rating score(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"}. short_term_strategy Will only be used, if `tenor` is "short-term". Choose between three distinct strategies in order to translate a long-term rating score into a short-term rating. Must be in {"best", "base", "worst"}. Compare https://hsbc.github.io/pyratings/short-term-rating/#there's-one-more-catch... - Strategy 1 (best): Always choose the best possible short-term rating. That's the optimistic approach. - Strategy 2 (base-case): Always choose the short-term rating that a rating agency would usually assign if there aren't any special liquidity issues (positive or negative). That's the base-case approach. - Strategy 3 (worst): Always choose the worst possible short-term rating. That's the conservative approach. Returns ------- Union[str, pd.Series, pd.DataFrame] Regular ratings according to `rating_provider`'s rating scale. Raises ------ ValueError If providing a single rating score and `rating_provider` is None. Examples -------- Converting a single long-term rating score: >>> get_ratings_from_scores(rating_scores=9, rating_provider="Fitch") 'BBB' Converting a single short-term rating score with different `short_term_strategy` arguments: >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="best", ... ) 'R-2M' >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="base", ... ) 'R-3' >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="worst", ... ) 'R-3' Converting a ``pd.Series`` with scores: >>> import pandas as pd >>> rating_scores_series = pd.Series(data=[5, 7, 1, np.nan, 22, pd.NA]) >>> get_ratings_from_scores( ... rating_scores=rating_scores_series, ... rating_provider="Moody's", ... tenor="long-term", ... ) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Providing a ``pd.Series`` without specifying a `rating_provider`: >>> rating_scores_series = pd.Series( ... data=[5, 7, 1, np.nan, 22, pd.NA], ... name="Moody", ... ) >>> get_ratings_from_scores(rating_scores=rating_scores_series) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Converting a ``pd.DataFrame`` with scores: >>> rating_scores_df = pd.DataFrame( ... data=[[11, 16, "foo"], [4, 2, 1], [22, "bar", 22]] ... ) >>> get_ratings_from_scores( ... rating_scores=rating_scores_df, ... rating_provider=["Fitch", "Bloomberg", "DBRS"], ... tenor="long-term", ... ) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN D When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred by the dataframe's columns. >>> rating_scores_df = pd.DataFrame( ... data={ ... "rtg_fitch": [11, 4, 22], ... "rtg_Bloomberg": [16, 2, "foo"], ... "DBRS Ratings": ["bar", 1, 22], ... } ... ) >>> get_ratings_from_scores(rating_scores=rating_scores_df) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN D
def get_ratings_from_scores( rating_scores: int | float | pd.Series | pd.DataFrame, rating_provider: str | list[str] | None = None, tenor: str = "long-term", short_term_strategy: str | None = None, ) -> str | pd.Series | pd.DataFrame: """Convert numerical rating scores into regular ratings. Parameters ---------- rating_scores Numerical rating score(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"}. short_term_strategy Will only be used, if `tenor` is "short-term". Choose between three distinct strategies in order to translate a long-term rating score into a short-term rating. Must be in {"best", "base", "worst"}. Compare https://hsbc.github.io/pyratings/short-term-rating/#there's-one-more-catch... - Strategy 1 (best): Always choose the best possible short-term rating. That's the optimistic approach. - Strategy 2 (base-case): Always choose the short-term rating that a rating agency would usually assign if there aren't any special liquidity issues (positive or negative). That's the base-case approach. - Strategy 3 (worst): Always choose the worst possible short-term rating. That's the conservative approach. Returns ------- Union[str, pd.Series, pd.DataFrame] Regular ratings according to `rating_provider`'s rating scale. Raises ------ ValueError If providing a single rating score and `rating_provider` is None. Examples -------- Converting a single long-term rating score: >>> get_ratings_from_scores(rating_scores=9, rating_provider="Fitch") 'BBB' Converting a single short-term rating score with different `short_term_strategy` arguments: >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="best", ... ) 'R-2M' >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="base", ... ) 'R-3' >>> get_ratings_from_scores( ... rating_scores=10, ... rating_provider="DBRS", ... tenor="short-term", ... short_term_strategy="worst", ... ) 'R-3' Converting a ``pd.Series`` with scores: >>> import pandas as pd >>> rating_scores_series = pd.Series(data=[5, 7, 1, np.nan, 22, pd.NA]) >>> get_ratings_from_scores( ... rating_scores=rating_scores_series, ... rating_provider="Moody's", ... tenor="long-term", ... ) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Providing a ``pd.Series`` without specifying a `rating_provider`: >>> rating_scores_series = pd.Series( ... data=[5, 7, 1, np.nan, 22, pd.NA], ... name="Moody", ... ) >>> get_ratings_from_scores(rating_scores=rating_scores_series) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Converting a ``pd.DataFrame`` with scores: >>> rating_scores_df = pd.DataFrame( ... data=[[11, 16, "foo"], [4, 2, 1], [22, "bar", 22]] ... ) >>> get_ratings_from_scores( ... rating_scores=rating_scores_df, ... rating_provider=["Fitch", "Bloomberg", "DBRS"], ... tenor="long-term", ... ) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN D When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred by the dataframe's columns. >>> rating_scores_df = pd.DataFrame( ... data={ ... "rtg_fitch": [11, 4, 22], ... "rtg_Bloomberg": [16, 2, "foo"], ... "DBRS Ratings": ["bar", 1, 22], ... } ... ) >>> get_ratings_from_scores(rating_scores=rating_scores_df) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN D """ if tenor == "short-term" and short_term_strategy is None: short_term_strategy = "base" if tenor == "short-term" and short_term_strategy not in ["best", "base", "worst"]: raise ValueError( "Invalid short_term_strategy. Must be in ['best', 'base', 'worst']." ) if isinstance(rating_scores, (int, float, np.number)): if rating_provider is None: raise ValueError(VALUE_ERROR_PROVIDER_MANDATORY) rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) rtg_dict = _get_translation_dict( "scores_to_rtg", rating_provider=rating_provider, tenor=tenor, st_rtg_strategy=short_term_strategy, ) if not np.isnan(rating_scores): rating_scores = int(Decimal(f"{rating_scores}").quantize(0, ROUND_HALF_UP)) if tenor == "long-term": return rtg_dict.get(rating_scores, pd.NA) else: try: return rtg_dict.loc[ (rating_scores >= rtg_dict["MinScore"]) & (rating_scores <= rtg_dict["MaxScore"]), "Rating", ].iloc[0] except IndexError: return np.nan elif isinstance(rating_scores, pd.Series): if rating_provider is None: rating_provider = _extract_rating_provider( rating_provider=rating_scores.name, valid_rtg_provider=valid_rtg_agncy[tenor], ) else: rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) rtg_dict = _get_translation_dict( "scores_to_rtg", rating_provider, tenor=tenor, st_rtg_strategy=short_term_strategy, ) # round element to full integer, if element is number rating_scores = rating_scores.apply( lambda x: np.round(x, 0) if isinstance(x, (int, float, np.number)) else x ) if tenor == "long-term": return pd.Series( data=rating_scores.map(rtg_dict), name=f"rtg_{rating_provider}" ) else: out = [] for score in rating_scores: try: out.append( rtg_dict.loc[ (score >= rtg_dict["MinScore"]) & (score <= rtg_dict["MaxScore"]), "Rating", ].iloc[0] ) except (IndexError, TypeError): out.append(pd.NA) return pd.Series(data=out, name=f"rtg_{rating_provider}") elif isinstance(rating_scores, pd.DataFrame): if rating_provider is None: rating_provider = _extract_rating_provider( rating_provider=rating_scores.columns.to_list(), valid_rtg_provider=valid_rtg_agncy[tenor], ) else: rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) # Recursive call of 'get_ratings_from_score' for every column in dataframe return pd.concat( [ get_ratings_from_scores( rating_scores=rating_scores[col], rating_provider=provider, tenor=tenor, short_term_strategy=short_term_strategy, ) for col, provider in zip( # noqa: B905 rating_scores.columns, rating_provider ) ], axis=1, )
(rating_scores: int | float | pandas.core.series.Series | pandas.core.frame.DataFrame, rating_provider: Union[str, list[str], NoneType] = None, tenor: str = 'long-term', short_term_strategy: Optional[str] = None) -> str | pandas.core.series.Series | pandas.core.frame.DataFrame
20,548
pyratings.get_ratings
get_ratings_from_warf
Convert WARFs into regular ratings. Parameters ---------- warf Numerical WARF(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. Returns ------- Union[str, pd.Series, pd.DataFrame] Regular rating(s) according to `rating_provider`'s rating scale. Examples -------- Converting a single WARF: >>> get_ratings_from_warf(warf=610, rating_provider="DBRS") 'BBBL' >>> get_ratings_from_warf(warf=1234.5678, rating_provider="SP") 'BB' Converting a ``pd.Series`` with WARFs: >>> import pandas as pd >>> warf_series = pd.Series(data=[90, 218.999, 1, np.nan, 10000, pd.NA]) >>> get_ratings_from_warf( ... warf=warf_series, ... rating_provider="Moody's", ... ) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Converting a ``pd.DataFrame`` with WARFs: >>> warf_df = pd.DataFrame( ... data=[[940, 4000, "foo"], [54, 13.5, 1], [10000, "bar", 9999]] ... ) >>> get_ratings_from_warf( ... warf=warf_df, ... rating_provider=["Fitch", "Bloomberg", "DBRS"], ... ) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN C
def get_ratings_from_warf( warf: int | float | pd.Series | pd.DataFrame, rating_provider: str | list[str] | None = None, ) -> str | pd.Series | pd.DataFrame: """Convert WARFs into regular ratings. Parameters ---------- warf Numerical WARF(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. Returns ------- Union[str, pd.Series, pd.DataFrame] Regular rating(s) according to `rating_provider`'s rating scale. Examples -------- Converting a single WARF: >>> get_ratings_from_warf(warf=610, rating_provider="DBRS") 'BBBL' >>> get_ratings_from_warf(warf=1234.5678, rating_provider="SP") 'BB' Converting a ``pd.Series`` with WARFs: >>> import pandas as pd >>> warf_series = pd.Series(data=[90, 218.999, 1, np.nan, 10000, pd.NA]) >>> get_ratings_from_warf( ... warf=warf_series, ... rating_provider="Moody's", ... ) 0 A1 1 A3 2 Aaa 3 NaN 4 D 5 NaN Name: rtg_Moody, dtype: object Converting a ``pd.DataFrame`` with WARFs: >>> warf_df = pd.DataFrame( ... data=[[940, 4000, "foo"], [54, 13.5, 1], [10000, "bar", 9999]] ... ) >>> get_ratings_from_warf( ... warf=warf_df, ... rating_provider=["Fitch", "Bloomberg", "DBRS"], ... ) rtg_Fitch rtg_Bloomberg rtg_DBRS 0 BB+ B- NaN 1 AA- AA+ AAA 2 D NaN C """ if isinstance(warf, (int, float, np.number)): if rating_provider is None: raise ValueError(VALUE_ERROR_PROVIDER_MANDATORY) rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy["long-term"], ) rating_scores = get_scores_from_warf(warf=warf) return get_ratings_from_scores( rating_scores=rating_scores, rating_provider=rating_provider, tenor="long-term", ) elif isinstance(warf, (pd.Series, pd.DataFrame)): rating_scores = get_scores_from_warf(warf=warf) return get_ratings_from_scores( rating_scores=rating_scores, rating_provider=rating_provider, tenor="long-term", )
(warf: int | float | pandas.core.series.Series | pandas.core.frame.DataFrame, rating_provider: Union[str, list[str], NoneType] = None) -> str | pandas.core.series.Series | pandas.core.frame.DataFrame
20,550
pyratings.get_scores
get_scores_from_ratings
Convert regular ratings into numerical rating scores. Parameters ---------- ratings Rating(s) to be translated into rating score(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- Union[int, pd.Series, pd.DataFrame] Numerical rating score(s) If returns a ``pd.Series``, the series name will be `rtg_score` suffixed by `ratings.name`. If return a ``pd.DataFrame``, the column names will be `rtg_score` suffixed by the respective `ratings.columns`. Raises ------ ValueError If providing a single rating and `rating_provider` is None. Examples -------- Converting a single long-term rating: >>> get_scores_from_ratings( ... ratings="BBB-", rating_provider="S&P", tenor="long-term" ... ) 10 Converting a single short-term rating score: >>> get_scores_from_ratings( ... ratings="P-1", rating_provider="Moody", tenor="short-term" ... ) 3.5 Converting a ``pd.Series`` of ratings: >>> import pandas as pd >>> ratings_series = pd.Series( ... data=["Baa1", "C", "NR", "WD", "D", "B1", "SD"], name='Moody' ... ) >>> get_scores_from_ratings( ... ratings=ratings_series, rating_provider="Moody's", tenor="long-term" ... ) 0 8.0 1 21.0 2 NaN 3 NaN 4 22.0 5 14.0 6 22.0 Name: rtg_score_Moody, dtype: float64 Providing a ``pd.Series`` without specifying a `rating_provider`: >>> ratings_series = pd.Series( ... data=["Baa1", "C", "NR", "WD", "D", "B1", "SD"], name="Moody" ... ) >>> get_scores_from_ratings(ratings=ratings_series) 0 8.0 1 21.0 2 NaN 3 NaN 4 22.0 5 14.0 6 22.0 Name: rtg_score_Moody, dtype: float64 Converting a ``pd.DataFrame`` with ratings: >>> ratings_df = pd.DataFrame( ... data=[["BB+", "B3", "BBB-"], ["AA-", "Aa1", "AAA"], ["D", "NR", "D"]], ... columns=["SP", "Moody", "DBRS"], ... ) >>> get_scores_from_ratings( ... ratings=ratings_df, ... rating_provider=["S&P", "Moody's", "DBRS"], ... tenor="long-term", ... ) rtg_score_SP rtg_score_Moody rtg_score_DBRS 0 11 16.0 NaN 1 4 2.0 1.0 2 22 NaN 22.0 When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred from the dataframe's columns. >>> ratings_df = pd.DataFrame( ... data={ ... "rtg_fitch": ["BB+", "AA-", "D"], ... "rtg_Bloomberg": ["B-", "AA+", "NR"], ... "DBRS Ratings": ["BBB-", "AAA", "D"], ... } ... ) >>> get_scores_from_ratings(ratings=ratings_df) rtg_score_rtg_fitch rtg_score_rtg_Bloomberg rtg_score_DBRS Ratings 0 11 16.0 NaN 1 4 2.0 1.0 2 22 NaN 22.0
def get_scores_from_ratings( ratings: str | pd.Series | pd.DataFrame, rating_provider: str | list[str] | None = None, tenor: str = "long-term", ) -> int | pd.Series | pd.DataFrame: """Convert regular ratings into numerical rating scores. Parameters ---------- ratings Rating(s) to be translated into rating score(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- Union[int, pd.Series, pd.DataFrame] Numerical rating score(s) If returns a ``pd.Series``, the series name will be `rtg_score` suffixed by `ratings.name`. If return a ``pd.DataFrame``, the column names will be `rtg_score` suffixed by the respective `ratings.columns`. Raises ------ ValueError If providing a single rating and `rating_provider` is None. Examples -------- Converting a single long-term rating: >>> get_scores_from_ratings( ... ratings="BBB-", rating_provider="S&P", tenor="long-term" ... ) 10 Converting a single short-term rating score: >>> get_scores_from_ratings( ... ratings="P-1", rating_provider="Moody", tenor="short-term" ... ) 3.5 Converting a ``pd.Series`` of ratings: >>> import pandas as pd >>> ratings_series = pd.Series( ... data=["Baa1", "C", "NR", "WD", "D", "B1", "SD"], name='Moody' ... ) >>> get_scores_from_ratings( ... ratings=ratings_series, rating_provider="Moody's", tenor="long-term" ... ) 0 8.0 1 21.0 2 NaN 3 NaN 4 22.0 5 14.0 6 22.0 Name: rtg_score_Moody, dtype: float64 Providing a ``pd.Series`` without specifying a `rating_provider`: >>> ratings_series = pd.Series( ... data=["Baa1", "C", "NR", "WD", "D", "B1", "SD"], name="Moody" ... ) >>> get_scores_from_ratings(ratings=ratings_series) 0 8.0 1 21.0 2 NaN 3 NaN 4 22.0 5 14.0 6 22.0 Name: rtg_score_Moody, dtype: float64 Converting a ``pd.DataFrame`` with ratings: >>> ratings_df = pd.DataFrame( ... data=[["BB+", "B3", "BBB-"], ["AA-", "Aa1", "AAA"], ["D", "NR", "D"]], ... columns=["SP", "Moody", "DBRS"], ... ) >>> get_scores_from_ratings( ... ratings=ratings_df, ... rating_provider=["S&P", "Moody's", "DBRS"], ... tenor="long-term", ... ) rtg_score_SP rtg_score_Moody rtg_score_DBRS 0 11 16.0 NaN 1 4 2.0 1.0 2 22 NaN 22.0 When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred from the dataframe's columns. >>> ratings_df = pd.DataFrame( ... data={ ... "rtg_fitch": ["BB+", "AA-", "D"], ... "rtg_Bloomberg": ["B-", "AA+", "NR"], ... "DBRS Ratings": ["BBB-", "AAA", "D"], ... } ... ) >>> get_scores_from_ratings(ratings=ratings_df) rtg_score_rtg_fitch rtg_score_rtg_Bloomberg rtg_score_DBRS Ratings 0 11 16.0 NaN 1 4 2.0 1.0 2 22 NaN 22.0 """ if isinstance(ratings, str): if rating_provider is None: raise ValueError(VALUE_ERROR_PROVIDER_MANDATORY) rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) rtg_dict = _get_translation_dict( "rtg_to_scores", rating_provider, tenor=tenor, st_rtg_strategy="base", ) return rtg_dict.get(ratings, pd.NA) elif isinstance(ratings, pd.Series): if rating_provider is None: rating_provider = _extract_rating_provider( rating_provider=ratings.name, valid_rtg_provider=valid_rtg_agncy[tenor], ) else: rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) rtg_dict = _get_translation_dict( "rtg_to_scores", rating_provider, tenor=tenor, st_rtg_strategy="base", ) return pd.Series(data=ratings.map(rtg_dict), name=f"rtg_score_{ratings.name}") elif isinstance(ratings, pd.DataFrame): if rating_provider is None: rating_provider = _extract_rating_provider( rating_provider=ratings.columns.to_list(), valid_rtg_provider=valid_rtg_agncy[tenor], ) else: rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy[tenor], ) # Recursive call of `get_scores_from_ratings` return pd.concat( [ get_scores_from_ratings( ratings=ratings[col], rating_provider=provider, tenor=tenor, ) for col, provider in zip(ratings.columns, rating_provider) # noqa: B905 ], axis=1, )
(ratings: str | pandas.core.series.Series | pandas.core.frame.DataFrame, rating_provider: Union[str, list[str], NoneType] = None, tenor: str = 'long-term') -> int | pandas.core.series.Series | pandas.core.frame.DataFrame
20,551
pyratings.get_scores
get_scores_from_warf
Convert weighted average rating factors (WARFs) into numerical rating scores. Parameters ---------- warf Weighted average rating factor (WARF). Returns ------- Union[int, float, pd.Series, pd.DataFrame] Numerical rating score(s). Examples -------- Converting a single WARF: >>> get_scores_from_warf(500) 10 >>> get_scores_from_warf(1992.9999) 13 Converting a ``pd.Series`` of WARFs: >>> import numpy as np >>> import pandas as pd >>> warf_series = pd.Series(data=[260, 9999.49, np.nan, 10000, 2469.99, 2470]) >>> get_scores_from_warf(warf=warf_series) 0 8.0 1 21.0 2 NaN 3 22.0 4 14.0 5 15.0 Name: rtg_score, dtype: float64 Converting a ``pd.DataFrame`` of WARFs: >>> warf_df = pd.DataFrame( ... data={ ... "provider1": [900, 40, 10000], ... "provider2": [3000, 10, np.nan], ... "provider3": [610, 1, 9999.49], ... } ... ) >>> get_scores_from_warf(warf=warf_df) rtg_score_provider1 rtg_score_provider2 rtg_score_provider3 0 11 15.0 10 1 4 2.0 1 2 22 NaN 21
def get_scores_from_warf( warf: int | float | pd.Series | pd.DataFrame, ) -> int | float | pd.Series | pd.DataFrame: """Convert weighted average rating factors (WARFs) into numerical rating scores. Parameters ---------- warf Weighted average rating factor (WARF). Returns ------- Union[int, float, pd.Series, pd.DataFrame] Numerical rating score(s). Examples -------- Converting a single WARF: >>> get_scores_from_warf(500) 10 >>> get_scores_from_warf(1992.9999) 13 Converting a ``pd.Series`` of WARFs: >>> import numpy as np >>> import pandas as pd >>> warf_series = pd.Series(data=[260, 9999.49, np.nan, 10000, 2469.99, 2470]) >>> get_scores_from_warf(warf=warf_series) 0 8.0 1 21.0 2 NaN 3 22.0 4 14.0 5 15.0 Name: rtg_score, dtype: float64 Converting a ``pd.DataFrame`` of WARFs: >>> warf_df = pd.DataFrame( ... data={ ... "provider1": [900, 40, 10000], ... "provider2": [3000, 10, np.nan], ... "provider3": [610, 1, 9999.49], ... } ... ) >>> get_scores_from_warf(warf=warf_df) rtg_score_provider1 rtg_score_provider2 rtg_score_provider3 0 11 15.0 10 1 4 2.0 1 2 22 NaN 21 """ def _get_scores_from_warf_db( wrf: int | float | pd.Series | pd.DataFrame, ) -> int | float: if not isinstance(wrf, (int, float, np.number) or np.isnan(wrf)) or not ( 1 <= wrf <= 10_000 ): return np.nan else: if wrf == 10_000: return 22 else: # connect to database connection = sqlite3.connect(RATINGS_DB) cursor = connection.cursor() # create SQL query sql_query = ( "SELECT RatingScore FROM WARFs WHERE ? >= MinWARF and ? < MaxWARF" ) # execute SQL query cursor.execute(sql_query, (wrf, wrf)) rtg_score = cursor.fetchall() # close database connection connection.close() return rtg_score[0][0] if isinstance(warf, (int, float, np.number)): return _get_scores_from_warf_db(warf) elif isinstance(warf, pd.Series): rating_scores = warf.apply(_get_scores_from_warf_db) rating_scores.name = "rtg_score" return rating_scores elif isinstance(warf, pd.DataFrame): return warf.applymap(_get_scores_from_warf_db).add_prefix("rtg_score_")
(warf: int | float | pandas.core.series.Series | pandas.core.frame.DataFrame) -> int | float | pandas.core.series.Series | pandas.core.frame.DataFrame
20,552
pyratings.consolidate
get_second_best_ratings
Compute the second-best rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Second-best ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_second_best_ratings( ... ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 AA+ 1 AA- 2 AA 3 BB- 4 C Name: second_best_rtg, dtype: object
def get_second_best_ratings( ratings: pd.DataFrame, rating_provider_input: list[str] = None, rating_provider_output: Literal[ "Fitch", "Moody", "S&P", "Bloomberg", "DBRS" ] = "S&P", tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the second-best rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Second-best ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_second_best_ratings( ... ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 AA+ 1 AA- 2 AA 3 BB- 4 C Name: second_best_rtg, dtype: object """ ratings_series = consolidate_ratings( method="second_best", ratings=ratings, rating_provider_input=rating_provider_input, rating_provider_output=rating_provider_output, tenor=tenor, ) return ratings_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, rating_provider_output: Literal['Fitch', 'Moody', 'S&P', 'Bloomberg', 'DBRS'] = 'S&P', tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,553
pyratings.consolidate
get_second_best_scores
Compute the second-best scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Second-best scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_second_best_scores( ... ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 2.0 1 4.0 2 3.0 3 13.0 4 21.0 Name: second_best_scores, dtype: float64
def get_second_best_scores( ratings: pd.DataFrame, rating_provider_input: list[str] = None, tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the second-best scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_input` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Second-best scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_second_best_scores( ... ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"] ... ) 0 2.0 1 4.0 2 3.0 3 13.0 4 21.0 Name: second_best_scores, dtype: float64 """ rating_scores_df = get_scores_from_ratings( ratings=ratings, rating_provider=rating_provider_input, tenor=tenor ) # rank scores per security (axis=1) scores_ranked_df = rating_scores_df.rank(axis=1, method="first", numeric_only=False) # get column with rank of 2, if available, otherwise get column with rank 1 rating_scores_ranked_series = rating_scores_df[scores_ranked_df <= 2].max(axis=1) rating_scores_ranked_series.name = "second_best_scores" return rating_scores_ranked_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,555
pyratings.warf
get_warf_buffer
Compute WARF buffer. The WARF buffer is the distance from current WARF to the next maxWARF level. It determines the room until a further rating downgrade. Parameters ---------- warf Numerical WARF. Returns ------- Union[float, int] WARF buffer. Examples -------- >>> get_warf_buffer(warf=480) 5.0 >>> get_warf_buffer(warf=54) 1.0
def get_warf_buffer(warf: float | int) -> float | int: """Compute WARF buffer. The WARF buffer is the distance from current WARF to the next maxWARF level. It determines the room until a further rating downgrade. Parameters ---------- warf Numerical WARF. Returns ------- Union[float, int] WARF buffer. Examples -------- >>> get_warf_buffer(warf=480) 5.0 >>> get_warf_buffer(warf=54) 1.0 """ # connect to database connection = sqlite3.connect(RATINGS_DB) cursor = connection.cursor() # create SQL query sql_query = "SELECT MaxWARF FROM WARFs WHERE ? >= MinWARF and ? < MaxWARF" # execute SQL query cursor.execute(sql_query, (warf, warf)) max_warf = cursor.fetchall() # close database connection connection.close() return max_warf[0][0] - warf
(warf: float | int) -> float | int
20,556
pyratings.get_warf
get_warf_from_ratings
Convert regular rating(s) to numerical WARF(s). Parameters ---------- ratings Regular rating(s) to be translated into WARF(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. Returns ------- Union[int, pd.Series, pd.DataFrame] Numerical WARF. If returns a ``pd.Series``, the series name will be `warf` suffixed by `ratings.name`. If return a ``pd.DataFrame``, the column names will be `warf` suffixed by the respective `ratings.columns`. Examples -------- Converting a single rating: >>> get_warf_from_ratings(ratings="BB-", rating_provider="Fitch") 1766 Converting a ``pd.Series`` with ratings: >>> import numpy as np >>> import pandas as pd >>> ratings_series = pd.Series(data=["A1", "A3", "Aaa", np.nan, "D", pd.NA]) >>> get_warf_from_ratings( ... ratings=ratings_series, rating_provider="Moody's" ... ) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf, dtype: float64 Providing a ``pd.Series`` without specifying a `rating_provider`: >>> ratings_series = pd.Series( ... data=["A1", "A3", "Aaa", np.nan, "D", pd.NA], ... name="Moody's" ... ) >>> get_warf_from_ratings(ratings=ratings_series) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf_Moody's, dtype: float64 Converting a ``pd.DataFrame`` with ratings: >>> ratings_df = pd.DataFrame( ... data=[["BB+", "B-", "foo"], ["AA-", "AA+", "AAA"], ["D", "bar", "C"]], ... columns=["Fitch", "Bloomberg", "DBRS"], ... ) >>> get_warf_from_ratings( ... ratings= ratings_df, rating_provider=["Fitch", "Bloomberg", "DBRS"] ... ) warf_Fitch warf_Bloomberg warf_DBRS 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 9999.0 When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred by the dataframe's columns. >>> ratings_df = pd.DataFrame( ... data={ ... "rtg_fitch": ["BB+", "AA-", "D"], ... "rtg_Bloomberg": ["B-", "AA+", "bar"], ... "DBRS Ratings": ["foo", "AAA", "C"] ... } ... ) >>> get_warf_from_ratings(ratings=ratings_df) warf_rtg_fitch warf_rtg_Bloomberg warf_DBRS Ratings 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 9999.0
def get_warf_from_ratings( ratings: str | pd.Series | pd.DataFrame, rating_provider: str | list[str] | None = None, ) -> int | pd.Series | pd.DataFrame: """Convert regular rating(s) to numerical WARF(s). Parameters ---------- ratings Regular rating(s) to be translated into WARF(s). rating_provider Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider` will be inferred from the series name or dataframe column names. Returns ------- Union[int, pd.Series, pd.DataFrame] Numerical WARF. If returns a ``pd.Series``, the series name will be `warf` suffixed by `ratings.name`. If return a ``pd.DataFrame``, the column names will be `warf` suffixed by the respective `ratings.columns`. Examples -------- Converting a single rating: >>> get_warf_from_ratings(ratings="BB-", rating_provider="Fitch") 1766 Converting a ``pd.Series`` with ratings: >>> import numpy as np >>> import pandas as pd >>> ratings_series = pd.Series(data=["A1", "A3", "Aaa", np.nan, "D", pd.NA]) >>> get_warf_from_ratings( ... ratings=ratings_series, rating_provider="Moody's" ... ) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf, dtype: float64 Providing a ``pd.Series`` without specifying a `rating_provider`: >>> ratings_series = pd.Series( ... data=["A1", "A3", "Aaa", np.nan, "D", pd.NA], ... name="Moody's" ... ) >>> get_warf_from_ratings(ratings=ratings_series) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf_Moody's, dtype: float64 Converting a ``pd.DataFrame`` with ratings: >>> ratings_df = pd.DataFrame( ... data=[["BB+", "B-", "foo"], ["AA-", "AA+", "AAA"], ["D", "bar", "C"]], ... columns=["Fitch", "Bloomberg", "DBRS"], ... ) >>> get_warf_from_ratings( ... ratings= ratings_df, rating_provider=["Fitch", "Bloomberg", "DBRS"] ... ) warf_Fitch warf_Bloomberg warf_DBRS 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 9999.0 When providing a ``pd.DataFrame`` without explicitly providing the `rating_provider`, they will be inferred by the dataframe's columns. >>> ratings_df = pd.DataFrame( ... data={ ... "rtg_fitch": ["BB+", "AA-", "D"], ... "rtg_Bloomberg": ["B-", "AA+", "bar"], ... "DBRS Ratings": ["foo", "AAA", "C"] ... } ... ) >>> get_warf_from_ratings(ratings=ratings_df) warf_rtg_fitch warf_rtg_Bloomberg warf_DBRS Ratings 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 9999.0 """ if rating_provider is not None: rating_provider = _extract_rating_provider( rating_provider=rating_provider, valid_rtg_provider=valid_rtg_agncy["long-term"], ) warf_dict = _get_translation_dict("scores_to_warf") if isinstance(ratings, str): rating_scores = get_scores_from_ratings( ratings=ratings, rating_provider=rating_provider, tenor="long-term" ) return warf_dict.get(rating_scores, np.nan) elif isinstance(ratings, (pd.Series, pd.DataFrame)): rating_scores = get_scores_from_ratings( ratings=ratings, rating_provider=rating_provider, tenor="long-term" ) if isinstance(ratings, pd.Series): rating_scores.name = ratings.name elif isinstance(ratings, pd.DataFrame): rating_scores.columns = ratings.columns return get_warf_from_scores(rating_scores=rating_scores)
(ratings: str | pandas.core.series.Series | pandas.core.frame.DataFrame, rating_provider: Union[str, list[str], NoneType] = None) -> int | pandas.core.series.Series | pandas.core.frame.DataFrame
20,557
pyratings.get_warf
get_warf_from_scores
Convert numerical rating score(s) to numerical WARF(s). Parameters ---------- rating_scores Numerical rating score(s). Returns ------- Union[int, pd.Series, pd.DataFrame Numerical WARF(s). If returns a ``pd.Series``, the series name will be `warf` suffixed by `rating_scores.name`. If return a ``pd.DataFrame``, the column names will be `warf` suffixed by the respective `rating_scores.columns`. Examples -------- Converting a single rating score: >>> get_warf_from_scores(10) 610 Converting a ``pd.Series`` with rating scores: >>> import pandas as pd >>> rating_scores_series = pd.Series(data=[5, 7, 1, np.nan, 22, pd.NA]) >>> get_warf_from_scores(rating_scores=rating_scores_series) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf, dtype: float64 Converting a ``pd.DataFrame`` with rating scores: >>> rating_scores_df = pd.DataFrame( ... data=[[11, 16, "foo"], [4, 2, 1], [22, "bar", 22]], ... columns=["provider1", "provider2", "provider3"], ... ) >>> get_warf_from_scores(rating_scores=rating_scores_df) warf_provider1 warf_provider2 warf_provider3 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 10000.0
def get_warf_from_scores( rating_scores: int | float | pd.Series | pd.DataFrame, ) -> int | pd.Series | pd.DataFrame: """Convert numerical rating score(s) to numerical WARF(s). Parameters ---------- rating_scores Numerical rating score(s). Returns ------- Union[int, pd.Series, pd.DataFrame Numerical WARF(s). If returns a ``pd.Series``, the series name will be `warf` suffixed by `rating_scores.name`. If return a ``pd.DataFrame``, the column names will be `warf` suffixed by the respective `rating_scores.columns`. Examples -------- Converting a single rating score: >>> get_warf_from_scores(10) 610 Converting a ``pd.Series`` with rating scores: >>> import pandas as pd >>> rating_scores_series = pd.Series(data=[5, 7, 1, np.nan, 22, pd.NA]) >>> get_warf_from_scores(rating_scores=rating_scores_series) 0 70.0 1 180.0 2 1.0 3 NaN 4 10000.0 5 NaN Name: warf, dtype: float64 Converting a ``pd.DataFrame`` with rating scores: >>> rating_scores_df = pd.DataFrame( ... data=[[11, 16, "foo"], [4, 2, 1], [22, "bar", 22]], ... columns=["provider1", "provider2", "provider3"], ... ) >>> get_warf_from_scores(rating_scores=rating_scores_df) warf_provider1 warf_provider2 warf_provider3 0 940 3490.0 NaN 1 40 10.0 1.0 2 10000 NaN 10000.0 """ warf_dict = _get_translation_dict("scores_to_warf") if isinstance(rating_scores, (int, float, np.number)): return warf_dict.get(rating_scores, np.nan) elif isinstance(rating_scores, pd.Series): warf = pd.Series(data=rating_scores.map(warf_dict)) if rating_scores.name is not None: warf.name = "warf_" + str(rating_scores.name) else: warf.name = "warf" return warf elif isinstance(rating_scores, pd.DataFrame): return rating_scores.apply(lambda x: x.map(warf_dict)).add_prefix("warf_")
(rating_scores: int | float | pandas.core.series.Series | pandas.core.frame.DataFrame) -> int | pandas.core.series.Series | pandas.core.frame.DataFrame
20,558
pyratings.aggregate
get_weighted_average
Compute weighted average. Parameters ---------- data Contains numerical values. weights Contains weights (between 0 and 1) with respect to `data`. Returns ------- float Weighted average data. Notes ----- Computing the weighted average is simply the sumproduct of `data` and `weights`. ``nan`` in `data` will be excluded from calculating the weighted average. All corresponding weights will be ignored. As a matter of fact, the remaining weights will be upscaled so that the weights of all ``non-nan`` rows in `data` will sum up to 1 (100%). Examples -------- >>> import numpy as np >>> import pandas as pd >>> rtg_scores = pd.Series(data=[5, 7, 9]) >>> wgt = pd.Series(data=[0.5, 0.3, 0.2]) >>> get_weighted_average(data=rtg_scores, weights=wgt) 6.4 >>> warf = pd.Series(data=[500, 735, np.nan, 93, np.nan]) >>> wgt = pd.Series(data=[0.4, 0.1, 0.1, 0.2, 0.2]) >>> get_weighted_average(data=warf, weights=wgt) 417.29
def get_weighted_average(data: pd.Series, weights: pd.Series) -> float: """Compute weighted average. Parameters ---------- data Contains numerical values. weights Contains weights (between 0 and 1) with respect to `data`. Returns ------- float Weighted average data. Notes ----- Computing the weighted average is simply the sumproduct of `data` and `weights`. ``nan`` in `data` will be excluded from calculating the weighted average. All corresponding weights will be ignored. As a matter of fact, the remaining weights will be upscaled so that the weights of all ``non-nan`` rows in `data` will sum up to 1 (100%). Examples -------- >>> import numpy as np >>> import pandas as pd >>> rtg_scores = pd.Series(data=[5, 7, 9]) >>> wgt = pd.Series(data=[0.5, 0.3, 0.2]) >>> get_weighted_average(data=rtg_scores, weights=wgt) 6.4 >>> warf = pd.Series(data=[500, 735, np.nan, 93, np.nan]) >>> wgt = pd.Series(data=[0.4, 0.1, 0.1, 0.2, 0.2]) >>> get_weighted_average(data=warf, weights=wgt) 417.29 """ # find indices in warf that correspond to np.nan idx_nan = data[pd.isna(data)].index # sum weights of securities with an actual rating, i.e. rating is not NaN weights_non_nan = 1 - sum(weights.loc[idx_nan]) # upscale to 100% weights_upscaled = weights / weights_non_nan return data.fillna(0).dot(weights_upscaled)
(data: pandas.core.series.Series, weights: pandas.core.series.Series) -> float
20,559
pyratings.consolidate
get_worst_ratings
Compute the worst rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_innput` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Worst ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_worst_ratings(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 AA- 1 AA- 2 AA- 3 B+ 4 C Name: worst_rtg, dtype: object
def get_worst_ratings( ratings: pd.DataFrame, rating_provider_input: list[str] = None, rating_provider_output: Literal[ "Fitch", "Moody", "S&P", "Bloomberg", "DBRS" ] = "S&P", tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the worst rating on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_innput` will be inferred from the dataframe column names. rating_provider_output Indicates which rating scale will be used for output results. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Worst ratings on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_worst_ratings(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 AA- 1 AA- 2 AA- 3 B+ 4 C Name: worst_rtg, dtype: object """ ratings_series = consolidate_ratings( method="worst", ratings=ratings, rating_provider_input=rating_provider_input, rating_provider_output=rating_provider_output, tenor=tenor, ) return ratings_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, rating_provider_output: Literal['Fitch', 'Moody', 'S&P', 'Bloomberg', 'DBRS'] = 'S&P', tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,560
pyratings.consolidate
get_worst_scores
Compute the worst scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_innput` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Worst scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_worst_scores(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 4 1 4 2 4 3 14 4 21 Name: worst_scores, dtype: int64
def get_worst_scores( ratings: pd.DataFrame, rating_provider_input: list[str] = None, tenor: Literal["long-term", "short-term"] = "long-term", ) -> pd.Series: """Compute the worst scores on a security level basis across rating agencies. Parameters ---------- ratings Dataframe consisting of clean ratings (i.e. stripped off of watches/outlooks) rating_provider_input Indicates rating providers within `ratings`. Should contain any valid rating provider out of {"Fitch", "Moody's", "S&P", "Bloomberg", "DBRS"}. If None, `rating_provider_innput` will be inferred from the dataframe column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} Returns ------- pd.Series Worst scores on a security level basis. Examples -------- >>> import pandas as pd >>> ratings_df = pd.DataFrame( ... data=( ... { ... "rating_S&P": ['AAA', 'AA-', 'AA+', 'BB-', 'C'], ... "rating_Moody's": ['Aa1', 'Aa3', 'Aa2', 'Ba3', 'Ca'], ... "rating_Fitch": ['AA-', 'AA-', 'AA-', 'B+', 'C'], ... } ... ) ... ) >>> get_worst_scores(ratings_df, rating_provider_input=["S&P", "Moody", "Fitch"]) 0 4 1 4 2 4 3 14 4 21 Name: worst_scores, dtype: int64 """ rating_scores_df = get_scores_from_ratings( ratings=ratings, rating_provider=rating_provider_input, tenor=tenor ) rating_scores_series = rating_scores_df.max(axis=1) rating_scores_series.name = "worst_scores" return rating_scores_series
(ratings: pandas.core.frame.DataFrame, rating_provider_input: Optional[list[str]] = None, tenor: Literal['long-term', 'short-term'] = 'long-term') -> pandas.core.series.Series
20,564
tabledata.error
DataError
Exception raised when data is invalid as tabular data.
class DataError(ValueError): """ Exception raised when data is invalid as tabular data. """
null
20,565
tabledata.error
InvalidHeaderNameError
Exception raised when a table header name is invalid.
class InvalidHeaderNameError(NameValidationError): """ Exception raised when a table header name is invalid. """
null
20,566
tabledata.error
InvalidTableNameError
Exception raised when a table name is invalid.
class InvalidTableNameError(NameValidationError): """ Exception raised when a table name is invalid. """
null
20,567
tabledata.error
NameValidationError
Exception raised when a name is invalid.
class NameValidationError(ValueError): """ Exception raised when a name is invalid. """
null
20,568
tabledata._constant
PatternMatch
An enumeration.
class PatternMatch(enum.Enum): OR = 0 AND = 1
(value, names=None, *, module=None, qualname=None, type=None, start=1)
20,569
tabledata._core
TableData
Class to represent a table data structure. :param table_name: Name of the table. :param headers: Table header names. :param rows: Data of the table.
class TableData: """ Class to represent a table data structure. :param table_name: Name of the table. :param headers: Table header names. :param rows: Data of the table. """ def __init__( self, table_name: Optional[str], headers: Sequence[str], rows: Sequence, dp_extractor: Optional[dp.DataPropertyExtractor] = None, type_hints: Optional[Sequence[Union[str, TypeHint]]] = None, max_workers: Optional[int] = None, max_precision: Optional[int] = None, ) -> None: self.__table_name = table_name self.__value_matrix: List[List[Any]] = [] self.__value_dp_matrix: Optional[DataPropertyMatrix] = None if rows: self.__rows = rows else: self.__rows = [] if dp_extractor: self.__dp_extractor = copy.deepcopy(dp_extractor) else: self.__dp_extractor = dp.DataPropertyExtractor(max_precision=max_precision) if type_hints: self.__dp_extractor.column_type_hints = type_hints self.__dp_extractor.strip_str_header = '"' if max_workers: self.__dp_extractor.max_workers = max_workers if not headers: self.__dp_extractor.headers = [] else: self.__dp_extractor.headers = headers def __repr__(self) -> str: element_list = [f"table_name={self.table_name}"] try: element_list.append("headers=[{}]".format(", ".join(self.headers))) except TypeError: element_list.append("headers=None") element_list.extend([f"cols={self.num_columns}", f"rows={self.num_rows}"]) return ", ".join(element_list) def __eq__(self, other: Any) -> bool: if not isinstance(other, TableData): return False return self.equals(other, cmp_by_dp=False) def __ne__(self, other: Any) -> bool: if not isinstance(other, TableData): return True return not self.equals(other, cmp_by_dp=False) @property def table_name(self) -> Optional[str]: """str: Name of the table.""" return self.__table_name @table_name.setter def table_name(self, value: Optional[str]) -> None: self.__table_name = value @property def headers(self) -> Sequence[str]: """Sequence[str]: Table header names.""" return self.__dp_extractor.headers @property def rows(self) -> Sequence: """Sequence: Original rows of tabular data.""" return self.__rows @property def value_matrix(self) -> DataPropertyMatrix: """DataPropertyMatrix: Converted rows of tabular data.""" if self.__value_matrix: return self.__value_matrix self.__value_matrix = [ [value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix ] return self.__value_matrix @property def has_value_dp_matrix(self) -> bool: return self.__value_dp_matrix is not None @property def max_workers(self) -> int: return self.__dp_extractor.max_workers @max_workers.setter def max_workers(self, value: Optional[int]) -> None: self.__dp_extractor.max_workers = value @property def num_rows(self) -> Optional[int]: """Optional[int]: Number of rows in the tabular data. |None| if the ``rows`` is neither list nor tuple. """ try: return len(self.rows) except TypeError: return None @property def num_columns(self) -> Optional[int]: if typepy.is_not_empty_sequence(self.headers): return len(self.headers) try: return len(self.rows[0]) except TypeError: return None except IndexError: return 0 @property def value_dp_matrix(self) -> DataPropertyMatrix: """DataPropertyMatrix: DataProperty for table data.""" if self.__value_dp_matrix is None: self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( to_value_matrix(self.headers, self.rows) ) return self.__value_dp_matrix @property def header_dp_list(self) -> List[dp.DataProperty]: return self.__dp_extractor.to_header_dp_list() @property def column_dp_list(self) -> List[dp.ColumnDataProperty]: return self.__dp_extractor.to_column_dp_list(self.value_dp_matrix) @property def dp_extractor(self) -> dp.DataPropertyExtractor: return self.__dp_extractor def is_empty_header(self) -> bool: """bool: |True| if the data :py:attr:`.headers` is empty.""" return typepy.is_empty_sequence(self.headers) def is_empty_rows(self) -> bool: """ :return: |True| if the tabular data has no rows. :rtype: bool """ return self.num_rows == 0 def is_empty(self) -> bool: """ :return: |True| if the data :py:attr:`.headers` or :py:attr:`.value_matrix` is empty. :rtype: bool """ return any([self.is_empty_header(), self.is_empty_rows()]) def equals(self, other: "TableData", cmp_by_dp: bool = True) -> bool: if cmp_by_dp: return self.__equals_dp(other) return self.__equals_raw(other) def __equals_base(self, other: "TableData") -> bool: compare_item_list = [self.table_name == other.table_name] if self.num_rows is not None: compare_item_list.append(self.num_rows == other.num_rows) return all(compare_item_list) def __equals_raw(self, other: "TableData") -> bool: if not self.__equals_base(other): return False if self.headers != other.headers: return False for lhs_row, rhs_row in zip(self.rows, other.rows): if len(lhs_row) != len(rhs_row): return False if not all( [ lhs == rhs for lhs, rhs in zip(lhs_row, rhs_row) if not Nan(lhs).is_type() and not Nan(rhs).is_type() ] ): return False return True def __equals_dp(self, other: "TableData") -> bool: if not self.__equals_base(other): return False if self.header_dp_list != other.header_dp_list: return False if self.value_dp_matrix is None or other.value_dp_matrix is None: return False for lhs_list, rhs_list in zip(self.value_dp_matrix, other.value_dp_matrix): if len(lhs_list) != len(rhs_list): return False if any([lhs != rhs for lhs, rhs in zip(lhs_list, rhs_list)]): return False return True def in_tabledata_list(self, other: Sequence["TableData"], cmp_by_dp: bool = True) -> bool: for table_data in other: if self.equals(table_data, cmp_by_dp=cmp_by_dp): return True return False def validate_rows(self) -> None: """ :raises ValueError: """ invalid_row_idx_list = [] for row_idx, row in enumerate(self.rows): if isinstance(row, (list, tuple)) and len(self.headers) != len(row): invalid_row_idx_list.append(row_idx) if isinstance(row, dict): if not all([header in row for header in self.headers]): invalid_row_idx_list.append(row_idx) if not invalid_row_idx_list: return for invalid_row_idx in invalid_row_idx_list: logger.debug(f"invalid row (line={invalid_row_idx}): {self.rows[invalid_row_idx]}") raise ValueError( "table header length and row length are mismatch:\n" + f" header(len={len(self.headers)}): {self.headers}\n" + " # of miss match rows: {} ouf of {}\n".format( len(invalid_row_idx_list), self.num_rows ) ) def as_dict(self, default_key: str = "table") -> Dict[str, List["OrderedDict[str, Any]"]]: """ Args: default_key: Key of a returning dictionary when the ``table_name`` is empty. Returns: dict: Table data as a |dict| instance. Sample Code: .. code:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dict() Output: .. code:: json {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]} """ # noqa dict_body = [] for row in self.value_matrix: if not row: continue values = [ (header, value) for header, value in zip(self.headers, row) if value is not None ] if not values: continue dict_body.append(OrderedDict(values)) table_name = self.table_name if not table_name: table_name = default_key return {table_name: dict_body} def as_tuple(self) -> Iterator[Tuple]: """ :return: Rows of the tuple. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_tuple() for record in records: print(record) :Output: .. code-block:: none Row(a=1, b=2) Row(a=Decimal('3.3'), b=Decimal('4.4')) """ Row = namedtuple("Row", self.headers) # type: ignore for value_dp_list in self.value_dp_matrix: if typepy.is_empty_sequence(value_dp_list): continue row = Row(*(value_dp.data for value_dp in value_dp_list)) yield row def as_dataframe(self) -> "pandas.DataFrame": """ :return: Table data as a ``pandas.DataFrame`` instance. :rtype: pandas.DataFrame :Sample Code: .. code-block:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dataframe() :Output: .. code-block:: none a b 0 1 2 1 3.3 4.4 :Dependency Packages: - `pandas <https://pandas.pydata.org/>`__ """ try: from pandas import DataFrame except ImportError: raise RuntimeError("required 'pandas' package to execute as_dataframe method") dataframe = DataFrame(self.value_matrix) if not self.is_empty_header(): dataframe.columns = self.headers return dataframe def transpose(self) -> "TableData": return TableData( self.table_name, self.headers, [row for row in zip(*self.rows)], max_workers=self.max_workers, ) def filter_column( self, patterns: Optional[str] = None, is_invert_match: bool = False, is_re_match: bool = False, pattern_match: PatternMatch = PatternMatch.OR, ) -> "TableData": logger.debug( "filter_column: patterns={}, is_invert_match={}, " "is_re_match={}, pattern_match={}".format( patterns, is_invert_match, is_re_match, pattern_match ) ) if not patterns: return self match_header_list = [] match_column_matrix = [] if pattern_match == PatternMatch.OR: match_method = any elif pattern_match == PatternMatch.AND: match_method = all else: raise ValueError(f"unknown matching: {pattern_match}") for header, column in zip(self.headers, zip(*self.rows)): is_match_list = [] for pattern in patterns: is_match = self.__is_match(header, pattern, is_re_match) is_match_list.append( any([is_match and not is_invert_match, not is_match and is_invert_match]) ) if match_method(is_match_list): match_header_list.append(header) match_column_matrix.append(column) logger.debug( "filter_column: table={}, match_header_list={}".format( self.table_name, match_header_list ) ) return TableData( self.table_name, match_header_list, list(zip(*match_column_matrix)), max_workers=self.max_workers, ) @staticmethod def from_dataframe( dataframe: "pandas.DataFrame", table_name: str = "", type_hints: Optional[Sequence[TypeHint]] = None, max_workers: Optional[int] = None, ) -> "TableData": """ Initialize TableData instance from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: :param str table_name: Table name to create. """ return TableData( table_name, list(dataframe.columns.values), dataframe.values.tolist(), type_hints=type_hints, max_workers=max_workers, ) @staticmethod def __is_match(header: str, pattern: str, is_re_match: bool) -> bool: if is_re_match: return re.search(pattern, header) is not None return header == pattern
(table_name: Optional[str], headers: Sequence[str], rows: Sequence, dp_extractor: Optional[dataproperty._extractor.DataPropertyExtractor] = None, type_hints: Optional[Sequence[Union[str, Type[typepy.type._base.AbstractType], NoneType]]] = None, max_workers: Optional[int] = None, max_precision: Optional[int] = None) -> None
20,570
tabledata._core
__equals_base
null
def __equals_base(self, other: "TableData") -> bool: compare_item_list = [self.table_name == other.table_name] if self.num_rows is not None: compare_item_list.append(self.num_rows == other.num_rows) return all(compare_item_list)
(self, other: tabledata._core.TableData) -> bool
20,571
tabledata._core
__equals_dp
null
def __equals_dp(self, other: "TableData") -> bool: if not self.__equals_base(other): return False if self.header_dp_list != other.header_dp_list: return False if self.value_dp_matrix is None or other.value_dp_matrix is None: return False for lhs_list, rhs_list in zip(self.value_dp_matrix, other.value_dp_matrix): if len(lhs_list) != len(rhs_list): return False if any([lhs != rhs for lhs, rhs in zip(lhs_list, rhs_list)]): return False return True
(self, other: tabledata._core.TableData) -> bool
20,572
tabledata._core
__equals_raw
null
def __equals_raw(self, other: "TableData") -> bool: if not self.__equals_base(other): return False if self.headers != other.headers: return False for lhs_row, rhs_row in zip(self.rows, other.rows): if len(lhs_row) != len(rhs_row): return False if not all( [ lhs == rhs for lhs, rhs in zip(lhs_row, rhs_row) if not Nan(lhs).is_type() and not Nan(rhs).is_type() ] ): return False return True
(self, other: tabledata._core.TableData) -> bool
20,573
tabledata._core
__is_match
null
@staticmethod def __is_match(header: str, pattern: str, is_re_match: bool) -> bool: if is_re_match: return re.search(pattern, header) is not None return header == pattern
(header: str, pattern: str, is_re_match: bool) -> bool
20,574
tabledata._core
__eq__
null
def __eq__(self, other: Any) -> bool: if not isinstance(other, TableData): return False return self.equals(other, cmp_by_dp=False)
(self, other: Any) -> bool
20,575
tabledata._core
__init__
null
def __init__( self, table_name: Optional[str], headers: Sequence[str], rows: Sequence, dp_extractor: Optional[dp.DataPropertyExtractor] = None, type_hints: Optional[Sequence[Union[str, TypeHint]]] = None, max_workers: Optional[int] = None, max_precision: Optional[int] = None, ) -> None: self.__table_name = table_name self.__value_matrix: List[List[Any]] = [] self.__value_dp_matrix: Optional[DataPropertyMatrix] = None if rows: self.__rows = rows else: self.__rows = [] if dp_extractor: self.__dp_extractor = copy.deepcopy(dp_extractor) else: self.__dp_extractor = dp.DataPropertyExtractor(max_precision=max_precision) if type_hints: self.__dp_extractor.column_type_hints = type_hints self.__dp_extractor.strip_str_header = '"' if max_workers: self.__dp_extractor.max_workers = max_workers if not headers: self.__dp_extractor.headers = [] else: self.__dp_extractor.headers = headers
(self, table_name: Optional[str], headers: Sequence[str], rows: Sequence, dp_extractor: Optional[dataproperty._extractor.DataPropertyExtractor] = None, type_hints: Optional[Sequence[Union[str, Type[typepy.type._base.AbstractType], NoneType]]] = None, max_workers: Optional[int] = None, max_precision: Optional[int] = None) -> NoneType
20,576
tabledata._core
__ne__
null
def __ne__(self, other: Any) -> bool: if not isinstance(other, TableData): return True return not self.equals(other, cmp_by_dp=False)
(self, other: Any) -> bool
20,577
tabledata._core
__repr__
null
def __repr__(self) -> str: element_list = [f"table_name={self.table_name}"] try: element_list.append("headers=[{}]".format(", ".join(self.headers))) except TypeError: element_list.append("headers=None") element_list.extend([f"cols={self.num_columns}", f"rows={self.num_rows}"]) return ", ".join(element_list)
(self) -> str
20,578
tabledata._core
as_dataframe
:return: Table data as a ``pandas.DataFrame`` instance. :rtype: pandas.DataFrame :Sample Code: .. code-block:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dataframe() :Output: .. code-block:: none a b 0 1 2 1 3.3 4.4 :Dependency Packages: - `pandas <https://pandas.pydata.org/>`__
def as_dataframe(self) -> "pandas.DataFrame": """ :return: Table data as a ``pandas.DataFrame`` instance. :rtype: pandas.DataFrame :Sample Code: .. code-block:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dataframe() :Output: .. code-block:: none a b 0 1 2 1 3.3 4.4 :Dependency Packages: - `pandas <https://pandas.pydata.org/>`__ """ try: from pandas import DataFrame except ImportError: raise RuntimeError("required 'pandas' package to execute as_dataframe method") dataframe = DataFrame(self.value_matrix) if not self.is_empty_header(): dataframe.columns = self.headers return dataframe
(self) -> 'pandas.DataFrame'
20,579
tabledata._core
as_dict
Args: default_key: Key of a returning dictionary when the ``table_name`` is empty. Returns: dict: Table data as a |dict| instance. Sample Code: .. code:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dict() Output: .. code:: json {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]}
def as_dict(self, default_key: str = "table") -> Dict[str, List["OrderedDict[str, Any]"]]: """ Args: default_key: Key of a returning dictionary when the ``table_name`` is empty. Returns: dict: Table data as a |dict| instance. Sample Code: .. code:: python from tabledata import TableData TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_dict() Output: .. code:: json {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]} """ # noqa dict_body = [] for row in self.value_matrix: if not row: continue values = [ (header, value) for header, value in zip(self.headers, row) if value is not None ] if not values: continue dict_body.append(OrderedDict(values)) table_name = self.table_name if not table_name: table_name = default_key return {table_name: dict_body}
(self, default_key: str = 'table') -> Dict[str, List[collections.OrderedDict[str, Any]]]
20,580
tabledata._core
as_tuple
:return: Rows of the tuple. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_tuple() for record in records: print(record) :Output: .. code-block:: none Row(a=1, b=2) Row(a=Decimal('3.3'), b=Decimal('4.4'))
def as_tuple(self) -> Iterator[Tuple]: """ :return: Rows of the tuple. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_tuple() for record in records: print(record) :Output: .. code-block:: none Row(a=1, b=2) Row(a=Decimal('3.3'), b=Decimal('4.4')) """ Row = namedtuple("Row", self.headers) # type: ignore for value_dp_list in self.value_dp_matrix: if typepy.is_empty_sequence(value_dp_list): continue row = Row(*(value_dp.data for value_dp in value_dp_list)) yield row
(self) -> Iterator[Tuple]
20,581
tabledata._core
equals
null
def equals(self, other: "TableData", cmp_by_dp: bool = True) -> bool: if cmp_by_dp: return self.__equals_dp(other) return self.__equals_raw(other)
(self, other: tabledata._core.TableData, cmp_by_dp: bool = True) -> bool
20,582
tabledata._core
filter_column
null
def filter_column( self, patterns: Optional[str] = None, is_invert_match: bool = False, is_re_match: bool = False, pattern_match: PatternMatch = PatternMatch.OR, ) -> "TableData": logger.debug( "filter_column: patterns={}, is_invert_match={}, " "is_re_match={}, pattern_match={}".format( patterns, is_invert_match, is_re_match, pattern_match ) ) if not patterns: return self match_header_list = [] match_column_matrix = [] if pattern_match == PatternMatch.OR: match_method = any elif pattern_match == PatternMatch.AND: match_method = all else: raise ValueError(f"unknown matching: {pattern_match}") for header, column in zip(self.headers, zip(*self.rows)): is_match_list = [] for pattern in patterns: is_match = self.__is_match(header, pattern, is_re_match) is_match_list.append( any([is_match and not is_invert_match, not is_match and is_invert_match]) ) if match_method(is_match_list): match_header_list.append(header) match_column_matrix.append(column) logger.debug( "filter_column: table={}, match_header_list={}".format( self.table_name, match_header_list ) ) return TableData( self.table_name, match_header_list, list(zip(*match_column_matrix)), max_workers=self.max_workers, )
(self, patterns: Optional[str] = None, is_invert_match: bool = False, is_re_match: bool = False, pattern_match: tabledata._constant.PatternMatch = <PatternMatch.OR: 0>) -> tabledata._core.TableData
20,583
tabledata._core
from_dataframe
Initialize TableData instance from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: :param str table_name: Table name to create.
@staticmethod def from_dataframe( dataframe: "pandas.DataFrame", table_name: str = "", type_hints: Optional[Sequence[TypeHint]] = None, max_workers: Optional[int] = None, ) -> "TableData": """ Initialize TableData instance from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: :param str table_name: Table name to create. """ return TableData( table_name, list(dataframe.columns.values), dataframe.values.tolist(), type_hints=type_hints, max_workers=max_workers, )
(dataframe: 'pandas.DataFrame', table_name: str = '', type_hints: Optional[Sequence[Optional[Type[typepy.type._base.AbstractType]]]] = None, max_workers: Optional[int] = None) -> 'TableData'
20,584
tabledata._core
in_tabledata_list
null
def in_tabledata_list(self, other: Sequence["TableData"], cmp_by_dp: bool = True) -> bool: for table_data in other: if self.equals(table_data, cmp_by_dp=cmp_by_dp): return True return False
(self, other: Sequence[tabledata._core.TableData], cmp_by_dp: bool = True) -> bool
20,585
tabledata._core
is_empty
:return: |True| if the data :py:attr:`.headers` or :py:attr:`.value_matrix` is empty. :rtype: bool
def is_empty(self) -> bool: """ :return: |True| if the data :py:attr:`.headers` or :py:attr:`.value_matrix` is empty. :rtype: bool """ return any([self.is_empty_header(), self.is_empty_rows()])
(self) -> bool
20,586
tabledata._core
is_empty_header
bool: |True| if the data :py:attr:`.headers` is empty.
def is_empty_header(self) -> bool: """bool: |True| if the data :py:attr:`.headers` is empty.""" return typepy.is_empty_sequence(self.headers)
(self) -> bool
20,587
tabledata._core
is_empty_rows
:return: |True| if the tabular data has no rows. :rtype: bool
def is_empty_rows(self) -> bool: """ :return: |True| if the tabular data has no rows. :rtype: bool """ return self.num_rows == 0
(self) -> bool
20,588
tabledata._core
transpose
null
def transpose(self) -> "TableData": return TableData( self.table_name, self.headers, [row for row in zip(*self.rows)], max_workers=self.max_workers, )
(self) -> tabledata._core.TableData
20,589
tabledata._core
validate_rows
:raises ValueError:
def validate_rows(self) -> None: """ :raises ValueError: """ invalid_row_idx_list = [] for row_idx, row in enumerate(self.rows): if isinstance(row, (list, tuple)) and len(self.headers) != len(row): invalid_row_idx_list.append(row_idx) if isinstance(row, dict): if not all([header in row for header in self.headers]): invalid_row_idx_list.append(row_idx) if not invalid_row_idx_list: return for invalid_row_idx in invalid_row_idx_list: logger.debug(f"invalid row (line={invalid_row_idx}): {self.rows[invalid_row_idx]}") raise ValueError( "table header length and row length are mismatch:\n" + f" header(len={len(self.headers)}): {self.headers}\n" + " # of miss match rows: {} ouf of {}\n".format( len(invalid_row_idx_list), self.num_rows ) )
(self) -> NoneType
20,595
tabledata._common
convert_idx_to_alphabet
null
def convert_idx_to_alphabet(idx: int) -> str: if idx < 26: return chr(65 + idx) div, mod = divmod(idx, 26) return convert_idx_to_alphabet(div - 1) + convert_idx_to_alphabet(mod)
(idx: int) -> str
20,597
tabledata._logger._logger
set_log_level
null
def set_log_level(log_level): # type: ignore warnings.warn( "'set_log_level' method is deprecated and will be removed in the future. ", DeprecationWarning, ) return
(log_level)
20,598
tabledata._logger._logger
set_logger
null
def set_logger(is_enable: bool, propagation_depth: int = 1) -> None: if is_enable: logger.enable(MODULE_NAME) else: logger.disable(MODULE_NAME) if propagation_depth <= 0: return dataproperty.set_logger(is_enable, propagation_depth - 1)
(is_enable: bool, propagation_depth: int = 1) -> NoneType
20,599
tabledata._converter
to_value_matrix
null
def to_value_matrix(headers: Sequence[str], value_matrix: Sequence[Any]) -> List[Row]: if not value_matrix: return [] return [_to_row(headers, values, row_idx)[1] for row_idx, values in enumerate(value_matrix)]
(headers: Sequence[str], value_matrix: Sequence[Any]) -> List[Tuple[int, Any]]
20,602
amaze_dict.amaze_dict
wrap_value
null
def wrap_value(value): return LeafBase(value)
(value)
20,603
http.client
HTTPConnection
null
class HTTPConnection: _http_vsn = 11 _http_vsn_str = 'HTTP/1.1' response_class = HTTPResponse default_port = HTTP_PORT auto_open = 1 debuglevel = 0 @staticmethod def _is_textIO(stream): """Test whether a file-like object is a text or a binary stream. """ return isinstance(stream, io.TextIOBase) @staticmethod def _get_content_length(body, method): """Get the content-length based on the body. If the body is None, we set Content-Length: 0 for methods that expect a body (RFC 7230, Section 3.3.2). We also set the Content-Length for any method if the body is a str or bytes-like object and not a file. """ if body is None: # do an explicit check for not None here to distinguish # between unset and set but empty if method.upper() in _METHODS_EXPECTING_BODY: return 0 else: return None if hasattr(body, 'read'): # file-like object. return None try: # does it implement the buffer protocol (bytes, bytearray, array)? mv = memoryview(body) return mv.nbytes except TypeError: pass if isinstance(body, str): return len(body) return None def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, blocksize=8192): self.timeout = timeout self.source_address = source_address self.blocksize = blocksize self.sock = None self._buffer = [] self.__response = None self.__state = _CS_IDLE self._method = None self._tunnel_host = None self._tunnel_port = None self._tunnel_headers = {} (self.host, self.port) = self._get_hostport(host, port) self._validate_host(self.host) # This is stored as an instance variable to allow unit # tests to replace it with a suitable mockup self._create_connection = socket.create_connection def set_tunnel(self, host, port=None, headers=None): """Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() def _get_hostport(self, host, port): if port is None: i = host.rfind(':') j = host.rfind(']') # ipv6 addresses have [...] if i > j: try: port = int(host[i+1:]) except ValueError: if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ port = self.default_port else: raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) host = host[:i] else: port = self.default_port if host and host[0] == '[' and host[-1] == ']': host = host[1:-1] return (host, port) def set_debuglevel(self, level): self.debuglevel = level def _tunnel(self): connect = b"CONNECT %s:%d HTTP/1.0\r\n" % ( self._tunnel_host.encode("ascii"), self._tunnel_port) headers = [connect] for header, value in self._tunnel_headers.items(): headers.append(f"{header}: {value}\r\n".encode("latin-1")) headers.append(b"\r\n") # Making a single send() call instead of one per line encourages # the host OS to use a more optimal packet size instead of # potentially emitting a series of small packets. self.send(b"".join(headers)) del headers response = self.response_class(self.sock, method=self._method) (version, code, message) = response._read_status() if code != http.HTTPStatus.OK: self.close() raise OSError(f"Tunnel connection failed: {code} {message.strip()}") while True: line = response.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") if not line: # for sites which EOF without sending a trailer break if line in (b'\r\n', b'\n', b''): break if self.debuglevel > 0: print('header:', line.decode()) def connect(self): """Connect to the host and port specified in __init__.""" sys.audit("http.client.connect", self, self.host, self.port) self.sock = self._create_connection( (self.host,self.port), self.timeout, self.source_address) # Might fail in OSs that don't implement TCP_NODELAY try: self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except OSError as e: if e.errno != errno.ENOPROTOOPT: raise if self._tunnel_host: self._tunnel() def close(self): """Close the connection to the HTTP server.""" self.__state = _CS_IDLE try: sock = self.sock if sock: self.sock = None sock.close() # close it manually... there may be other refs finally: response = self.__response if response: self.__response = None response.close() def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) if hasattr(data, "read") : if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(data) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") sys.audit("http.client.send", self, datablock) self.sock.sendall(datablock) return sys.audit("http.client.send", self, data) try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.abc.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data)) def _output(self, s): """Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n. """ self._buffer.append(s) def _read_readable(self, readable): if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(readable) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while True: datablock = readable.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") yield datablock def _send_output(self, message_body=None, encode_chunked=False): """Send the currently buffered request and clear the buffer. Appends an extra \\r\\n to the buffer. A message_body may be specified, to be appended to the request. """ self._buffer.extend((b"", b"")) msg = b"\r\n".join(self._buffer) del self._buffer[:] self.send(msg) if message_body is not None: # create a consistent interface to message_body if hasattr(message_body, 'read'): # Let file-like take precedence over byte-like. This # is needed to allow the current position of mmap'ed # files to be taken into account. chunks = self._read_readable(message_body) else: try: # this is solely to check to see if message_body # implements the buffer API. it /would/ be easier # to capture if PyObject_CheckBuffer was exposed # to Python. memoryview(message_body) except TypeError: try: chunks = iter(message_body) except TypeError: raise TypeError("message_body should be a bytes-like " "object or an iterable, got %r" % type(message_body)) else: # the object implements the buffer interface and # can be passed directly into socket methods chunks = (message_body,) for chunk in chunks: if not chunk: if self.debuglevel > 0: print('Zero length chunk ignored') continue if encode_chunked and self._http_vsn == 11: # chunked encoding chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \ + b'\r\n' self.send(chunk) if encode_chunked and self._http_vsn == 11: # end chunked transfer self.send(b'0\r\n\r\n') def putrequest(self, method, url, skip_host=False, skip_accept_encoding=False): """Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # in certain cases, we cannot issue another request on this connection. # this occurs when: # 1) we are in the process of sending a request. (_CS_REQ_STARTED) # 2) a response to a previous request has signalled that it is going # to close the connection upon completion. # 3) the headers for the previous response have not been read, thus # we cannot determine whether point (2) is true. (_CS_REQ_SENT) # # if there is no prior response, then we can request at will. # # if point (2) is true, then we will have passed the socket to the # response (effectively meaning, "there is no prior response"), and # will open a new one when a new request is made. # # Note: if a prior response exists, then we *can* start a new request. # We are not allowed to begin fetching the response to this new # request, however, until that prior response is complete. # if self.__state == _CS_IDLE: self.__state = _CS_REQ_STARTED else: raise CannotSendRequest(self.__state) self._validate_method(method) # Save the method for use later in the response phase self._method = method url = url or '/' self._validate_path(url) request = '%s %s %s' % (method, url, self._http_vsn_str) self._output(self._encode_request(request)) if self._http_vsn == 11: # Issue some standard headers for better HTTP/1.1 compliance if not skip_host: # this header is issued *only* for HTTP/1.1 # connections. more specifically, this means it is # only issued when the client uses the new # HTTPConnection() class. backwards-compat clients # will be using HTTP/1.0 and those clients may be # issuing this header themselves. we should NOT issue # it twice; some web servers (such as Apache) barf # when they see two Host: headers # If we need a non-standard port,include it in the # header. If the request is going through a proxy, # but the host of the actual URL, not the host of the # proxy. netloc = '' if url.startswith('http'): nil, netloc, nil, nil, nil = urlsplit(url) if netloc: try: netloc_enc = netloc.encode("ascii") except UnicodeEncodeError: netloc_enc = netloc.encode("idna") self.putheader('Host', netloc_enc) else: if self._tunnel_host: host = self._tunnel_host port = self._tunnel_port else: host = self.host port = self.port try: host_enc = host.encode("ascii") except UnicodeEncodeError: host_enc = host.encode("idna") # As per RFC 273, IPv6 address should be wrapped with [] # when used as Host header if host.find(':') >= 0: host_enc = b'[' + host_enc + b']' if port == self.default_port: self.putheader('Host', host_enc) else: host_enc = host_enc.decode("ascii") self.putheader('Host', "%s:%s" % (host_enc, port)) # note: we are assuming that clients will not attempt to set these # headers since *this* library must deal with the # consequences. this also means that when the supporting # libraries are updated to recognize other forms, then this # code should be changed (removed or updated). # we only want a Content-Encoding of "identity" since we don't # support encodings such as x-gzip or x-deflate. if not skip_accept_encoding: self.putheader('Accept-Encoding', 'identity') # we can accept "chunked" Transfer-Encodings, but no others # NOTE: no TE header implies *only* "chunked" #self.putheader('TE', 'chunked') # if TE is supplied in the header, then it must appear in a # Connection header. #self.putheader('Connection', 'TE') else: # For HTTP/1.0, the server will assume "not chunked" pass def _encode_request(self, request): # ASCII also helps prevent CVE-2019-9740. return request.encode('ascii') def _validate_method(self, method): """Validate a method name for putrequest.""" # prevent http header injection match = _contains_disallowed_method_pchar_re.search(method) if match: raise ValueError( f"method can't contain control characters. {method!r} " f"(found at least {match.group()!r})") def _validate_path(self, url): """Validate a url for putrequest.""" # Prevent CVE-2019-9740. match = _contains_disallowed_url_pchar_re.search(url) if match: raise InvalidURL(f"URL can't contain control characters. {url!r} " f"(found at least {match.group()!r})") def _validate_host(self, host): """Validate a host so it doesn't contain control characters.""" # Prevent CVE-2019-18348. match = _contains_disallowed_url_pchar_re.search(host) if match: raise InvalidURL(f"URL can't contain control characters. {host!r} " f"(found at least {match.group()!r})") def putheader(self, header, *values): """Send a request header line to the server. For example: h.putheader('Accept', 'text/html') """ if self.__state != _CS_REQ_STARTED: raise CannotSendHeader() if hasattr(header, 'encode'): header = header.encode('ascii') if not _is_legal_header_name(header): raise ValueError('Invalid header name %r' % (header,)) values = list(values) for i, one_value in enumerate(values): if hasattr(one_value, 'encode'): values[i] = one_value.encode('latin-1') elif isinstance(one_value, int): values[i] = str(one_value).encode('ascii') if _is_illegal_header_value(values[i]): raise ValueError('Invalid header value %r' % (values[i],)) value = b'\r\n\t'.join(values) header = header + b': ' + value self._output(header) def endheaders(self, message_body=None, *, encode_chunked=False): """Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request. """ if self.__state == _CS_REQ_STARTED: self.__state = _CS_REQ_SENT else: raise CannotSendHeader() self._send_output(message_body, encode_chunked=encode_chunked) def request(self, method, url, body=None, headers={}, *, encode_chunked=False): """Send a complete request to the server.""" self._send_request(method, url, body, headers, encode_chunked) def _send_request(self, method, url, body, headers, encode_chunked): # Honor explicitly requested Host: and Accept-Encoding: headers. header_names = frozenset(k.lower() for k in headers) skips = {} if 'host' in header_names: skips['skip_host'] = 1 if 'accept-encoding' in header_names: skips['skip_accept_encoding'] = 1 self.putrequest(method, url, **skips) # chunked encoding will happen if HTTP/1.1 is used and either # the caller passes encode_chunked=True or the following # conditions hold: # 1. content-length has not been explicitly set # 2. the body is a file or iterable, but not a str or bytes-like # 3. Transfer-Encoding has NOT been explicitly set by the caller if 'content-length' not in header_names: # only chunk body if not explicitly set for backwards # compatibility, assuming the client code is already handling the # chunking if 'transfer-encoding' not in header_names: # if content-length cannot be automatically determined, fall # back to chunked encoding encode_chunked = False content_length = self._get_content_length(body, method) if content_length is None: if body is not None: if self.debuglevel > 0: print('Unable to determine size of %r' % body) encode_chunked = True self.putheader('Transfer-Encoding', 'chunked') else: self.putheader('Content-Length', str(content_length)) else: encode_chunked = False for hdr, value in headers.items(): self.putheader(hdr, value) if isinstance(body, str): # RFC 2616 Section 3.7.1 says that text default has a # default charset of iso-8859-1. body = _encode(body, 'body') self.endheaders(body, encode_chunked=encode_chunked) def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) try: try: response.begin() except ConnectionError: self.close() raise assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response except: response.close() raise
(host, port=None, timeout=<object object at 0x7fbc93ea0e50>, source_address=None, blocksize=8192)
20,604
http.client
__init__
null
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, blocksize=8192): self.timeout = timeout self.source_address = source_address self.blocksize = blocksize self.sock = None self._buffer = [] self.__response = None self.__state = _CS_IDLE self._method = None self._tunnel_host = None self._tunnel_port = None self._tunnel_headers = {} (self.host, self.port) = self._get_hostport(host, port) self._validate_host(self.host) # This is stored as an instance variable to allow unit # tests to replace it with a suitable mockup self._create_connection = socket.create_connection
(self, host, port=None, timeout=<object object at 0x7fbc93ea0e50>, source_address=None, blocksize=8192)
20,605
http.client
_encode_request
null
def _encode_request(self, request): # ASCII also helps prevent CVE-2019-9740. return request.encode('ascii')
(self, request)
20,606
http.client
_get_content_length
Get the content-length based on the body. If the body is None, we set Content-Length: 0 for methods that expect a body (RFC 7230, Section 3.3.2). We also set the Content-Length for any method if the body is a str or bytes-like object and not a file.
@staticmethod def _get_content_length(body, method): """Get the content-length based on the body. If the body is None, we set Content-Length: 0 for methods that expect a body (RFC 7230, Section 3.3.2). We also set the Content-Length for any method if the body is a str or bytes-like object and not a file. """ if body is None: # do an explicit check for not None here to distinguish # between unset and set but empty if method.upper() in _METHODS_EXPECTING_BODY: return 0 else: return None if hasattr(body, 'read'): # file-like object. return None try: # does it implement the buffer protocol (bytes, bytearray, array)? mv = memoryview(body) return mv.nbytes except TypeError: pass if isinstance(body, str): return len(body) return None
(body, method)
20,607
http.client
_get_hostport
null
def _get_hostport(self, host, port): if port is None: i = host.rfind(':') j = host.rfind(']') # ipv6 addresses have [...] if i > j: try: port = int(host[i+1:]) except ValueError: if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ port = self.default_port else: raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) host = host[:i] else: port = self.default_port if host and host[0] == '[' and host[-1] == ']': host = host[1:-1] return (host, port)
(self, host, port)
20,608
http.client
_is_textIO
Test whether a file-like object is a text or a binary stream.
@staticmethod def _is_textIO(stream): """Test whether a file-like object is a text or a binary stream. """ return isinstance(stream, io.TextIOBase)
(stream)
20,609
http.client
_output
Add a line of output to the current request buffer. Assumes that the line does *not* end with \r\n.
def _output(self, s): """Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n. """ self._buffer.append(s)
(self, s)
20,610
http.client
_read_readable
null
def _read_readable(self, readable): if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(readable) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while True: datablock = readable.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") yield datablock
(self, readable)
20,611
http.client
_send_output
Send the currently buffered request and clear the buffer. Appends an extra \r\n to the buffer. A message_body may be specified, to be appended to the request.
def _send_output(self, message_body=None, encode_chunked=False): """Send the currently buffered request and clear the buffer. Appends an extra \\r\\n to the buffer. A message_body may be specified, to be appended to the request. """ self._buffer.extend((b"", b"")) msg = b"\r\n".join(self._buffer) del self._buffer[:] self.send(msg) if message_body is not None: # create a consistent interface to message_body if hasattr(message_body, 'read'): # Let file-like take precedence over byte-like. This # is needed to allow the current position of mmap'ed # files to be taken into account. chunks = self._read_readable(message_body) else: try: # this is solely to check to see if message_body # implements the buffer API. it /would/ be easier # to capture if PyObject_CheckBuffer was exposed # to Python. memoryview(message_body) except TypeError: try: chunks = iter(message_body) except TypeError: raise TypeError("message_body should be a bytes-like " "object or an iterable, got %r" % type(message_body)) else: # the object implements the buffer interface and # can be passed directly into socket methods chunks = (message_body,) for chunk in chunks: if not chunk: if self.debuglevel > 0: print('Zero length chunk ignored') continue if encode_chunked and self._http_vsn == 11: # chunked encoding chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \ + b'\r\n' self.send(chunk) if encode_chunked and self._http_vsn == 11: # end chunked transfer self.send(b'0\r\n\r\n')
(self, message_body=None, encode_chunked=False)
20,612
http.client
_send_request
null
def _send_request(self, method, url, body, headers, encode_chunked): # Honor explicitly requested Host: and Accept-Encoding: headers. header_names = frozenset(k.lower() for k in headers) skips = {} if 'host' in header_names: skips['skip_host'] = 1 if 'accept-encoding' in header_names: skips['skip_accept_encoding'] = 1 self.putrequest(method, url, **skips) # chunked encoding will happen if HTTP/1.1 is used and either # the caller passes encode_chunked=True or the following # conditions hold: # 1. content-length has not been explicitly set # 2. the body is a file or iterable, but not a str or bytes-like # 3. Transfer-Encoding has NOT been explicitly set by the caller if 'content-length' not in header_names: # only chunk body if not explicitly set for backwards # compatibility, assuming the client code is already handling the # chunking if 'transfer-encoding' not in header_names: # if content-length cannot be automatically determined, fall # back to chunked encoding encode_chunked = False content_length = self._get_content_length(body, method) if content_length is None: if body is not None: if self.debuglevel > 0: print('Unable to determine size of %r' % body) encode_chunked = True self.putheader('Transfer-Encoding', 'chunked') else: self.putheader('Content-Length', str(content_length)) else: encode_chunked = False for hdr, value in headers.items(): self.putheader(hdr, value) if isinstance(body, str): # RFC 2616 Section 3.7.1 says that text default has a # default charset of iso-8859-1. body = _encode(body, 'body') self.endheaders(body, encode_chunked=encode_chunked)
(self, method, url, body, headers, encode_chunked)
20,613
http.client
_tunnel
null
def _tunnel(self): connect = b"CONNECT %s:%d HTTP/1.0\r\n" % ( self._tunnel_host.encode("ascii"), self._tunnel_port) headers = [connect] for header, value in self._tunnel_headers.items(): headers.append(f"{header}: {value}\r\n".encode("latin-1")) headers.append(b"\r\n") # Making a single send() call instead of one per line encourages # the host OS to use a more optimal packet size instead of # potentially emitting a series of small packets. self.send(b"".join(headers)) del headers response = self.response_class(self.sock, method=self._method) (version, code, message) = response._read_status() if code != http.HTTPStatus.OK: self.close() raise OSError(f"Tunnel connection failed: {code} {message.strip()}") while True: line = response.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") if not line: # for sites which EOF without sending a trailer break if line in (b'\r\n', b'\n', b''): break if self.debuglevel > 0: print('header:', line.decode())
(self)
20,614
http.client
_validate_host
Validate a host so it doesn't contain control characters.
def _validate_host(self, host): """Validate a host so it doesn't contain control characters.""" # Prevent CVE-2019-18348. match = _contains_disallowed_url_pchar_re.search(host) if match: raise InvalidURL(f"URL can't contain control characters. {host!r} " f"(found at least {match.group()!r})")
(self, host)
20,615
http.client
_validate_method
Validate a method name for putrequest.
def _validate_method(self, method): """Validate a method name for putrequest.""" # prevent http header injection match = _contains_disallowed_method_pchar_re.search(method) if match: raise ValueError( f"method can't contain control characters. {method!r} " f"(found at least {match.group()!r})")
(self, method)
20,616
http.client
_validate_path
Validate a url for putrequest.
def _validate_path(self, url): """Validate a url for putrequest.""" # Prevent CVE-2019-9740. match = _contains_disallowed_url_pchar_re.search(url) if match: raise InvalidURL(f"URL can't contain control characters. {url!r} " f"(found at least {match.group()!r})")
(self, url)
20,617
http.client
close
Close the connection to the HTTP server.
def close(self): """Close the connection to the HTTP server.""" self.__state = _CS_IDLE try: sock = self.sock if sock: self.sock = None sock.close() # close it manually... there may be other refs finally: response = self.__response if response: self.__response = None response.close()
(self)
20,618
http.client
connect
Connect to the host and port specified in __init__.
def connect(self): """Connect to the host and port specified in __init__.""" sys.audit("http.client.connect", self, self.host, self.port) self.sock = self._create_connection( (self.host,self.port), self.timeout, self.source_address) # Might fail in OSs that don't implement TCP_NODELAY try: self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except OSError as e: if e.errno != errno.ENOPROTOOPT: raise if self._tunnel_host: self._tunnel()
(self)
20,619
http.client
endheaders
Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request.
def endheaders(self, message_body=None, *, encode_chunked=False): """Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request. """ if self.__state == _CS_REQ_STARTED: self.__state = _CS_REQ_SENT else: raise CannotSendHeader() self._send_output(message_body, encode_chunked=encode_chunked)
(self, message_body=None, *, encode_chunked=False)
20,620
http.client
getresponse
Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed.
def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) try: try: response.begin() except ConnectionError: self.close() raise assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response except: response.close() raise
(self)
20,621
http.client
putheader
Send a request header line to the server. For example: h.putheader('Accept', 'text/html')
def putheader(self, header, *values): """Send a request header line to the server. For example: h.putheader('Accept', 'text/html') """ if self.__state != _CS_REQ_STARTED: raise CannotSendHeader() if hasattr(header, 'encode'): header = header.encode('ascii') if not _is_legal_header_name(header): raise ValueError('Invalid header name %r' % (header,)) values = list(values) for i, one_value in enumerate(values): if hasattr(one_value, 'encode'): values[i] = one_value.encode('latin-1') elif isinstance(one_value, int): values[i] = str(one_value).encode('ascii') if _is_illegal_header_value(values[i]): raise ValueError('Invalid header value %r' % (values[i],)) value = b'\r\n\t'.join(values) header = header + b': ' + value self._output(header)
(self, header, *values)
20,622
http.client
putrequest
Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header
def putrequest(self, method, url, skip_host=False, skip_accept_encoding=False): """Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # in certain cases, we cannot issue another request on this connection. # this occurs when: # 1) we are in the process of sending a request. (_CS_REQ_STARTED) # 2) a response to a previous request has signalled that it is going # to close the connection upon completion. # 3) the headers for the previous response have not been read, thus # we cannot determine whether point (2) is true. (_CS_REQ_SENT) # # if there is no prior response, then we can request at will. # # if point (2) is true, then we will have passed the socket to the # response (effectively meaning, "there is no prior response"), and # will open a new one when a new request is made. # # Note: if a prior response exists, then we *can* start a new request. # We are not allowed to begin fetching the response to this new # request, however, until that prior response is complete. # if self.__state == _CS_IDLE: self.__state = _CS_REQ_STARTED else: raise CannotSendRequest(self.__state) self._validate_method(method) # Save the method for use later in the response phase self._method = method url = url or '/' self._validate_path(url) request = '%s %s %s' % (method, url, self._http_vsn_str) self._output(self._encode_request(request)) if self._http_vsn == 11: # Issue some standard headers for better HTTP/1.1 compliance if not skip_host: # this header is issued *only* for HTTP/1.1 # connections. more specifically, this means it is # only issued when the client uses the new # HTTPConnection() class. backwards-compat clients # will be using HTTP/1.0 and those clients may be # issuing this header themselves. we should NOT issue # it twice; some web servers (such as Apache) barf # when they see two Host: headers # If we need a non-standard port,include it in the # header. If the request is going through a proxy, # but the host of the actual URL, not the host of the # proxy. netloc = '' if url.startswith('http'): nil, netloc, nil, nil, nil = urlsplit(url) if netloc: try: netloc_enc = netloc.encode("ascii") except UnicodeEncodeError: netloc_enc = netloc.encode("idna") self.putheader('Host', netloc_enc) else: if self._tunnel_host: host = self._tunnel_host port = self._tunnel_port else: host = self.host port = self.port try: host_enc = host.encode("ascii") except UnicodeEncodeError: host_enc = host.encode("idna") # As per RFC 273, IPv6 address should be wrapped with [] # when used as Host header if host.find(':') >= 0: host_enc = b'[' + host_enc + b']' if port == self.default_port: self.putheader('Host', host_enc) else: host_enc = host_enc.decode("ascii") self.putheader('Host', "%s:%s" % (host_enc, port)) # note: we are assuming that clients will not attempt to set these # headers since *this* library must deal with the # consequences. this also means that when the supporting # libraries are updated to recognize other forms, then this # code should be changed (removed or updated). # we only want a Content-Encoding of "identity" since we don't # support encodings such as x-gzip or x-deflate. if not skip_accept_encoding: self.putheader('Accept-Encoding', 'identity') # we can accept "chunked" Transfer-Encodings, but no others # NOTE: no TE header implies *only* "chunked" #self.putheader('TE', 'chunked') # if TE is supplied in the header, then it must appear in a # Connection header. #self.putheader('Connection', 'TE') else: # For HTTP/1.0, the server will assume "not chunked" pass
(self, method, url, skip_host=False, skip_accept_encoding=False)
20,623
http.client
request
Send a complete request to the server.
def request(self, method, url, body=None, headers={}, *, encode_chunked=False): """Send a complete request to the server.""" self._send_request(method, url, body, headers, encode_chunked)
(self, method, url, body=None, headers={}, *, encode_chunked=False)
20,624
http.client
send
Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object.
def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) if hasattr(data, "read") : if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(data) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") sys.audit("http.client.send", self, datablock) self.sock.sendall(datablock) return sys.audit("http.client.send", self, data) try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.abc.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data))
(self, data)
20,625
http.client
set_debuglevel
null
def set_debuglevel(self, level): self.debuglevel = level
(self, level)
20,626
http.client
set_tunnel
Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request.
def set_tunnel(self, host, port=None, headers=None): """Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
(self, host, port=None, headers=None)
20,627
mureq
HTTPErrorStatus
HTTPErrorStatus is raised by Response.raise_for_status() to indicate an HTTP error code (a 40x or a 50x). Note that a well-formed response with an error code does not result in an exception unless raise_for_status() is called explicitly.
class HTTPErrorStatus(HTTPException): """HTTPErrorStatus is raised by Response.raise_for_status() to indicate an HTTP error code (a 40x or a 50x). Note that a well-formed response with an error code does not result in an exception unless raise_for_status() is called explicitly. """ def __init__(self, status_code): self.status_code = status_code def __str__(self): return f"HTTP response returned error code {self.status_code:d}"
(status_code)
20,628
mureq
__init__
null
def __init__(self, status_code): self.status_code = status_code
(self, status_code)
20,629
mureq
__str__
null
def __str__(self): return f"HTTP response returned error code {self.status_code:d}"
(self)
20,630
http.client
HTTPException
null
class HTTPException(Exception): # Subclasses that define an __init__ must call Exception.__init__ # or define self.args. Otherwise, str() will fail. pass
null
20,631
http.client
HTTPMessage
null
class HTTPMessage(email.message.Message): # XXX The only usage of this method is in # http.server.CGIHTTPRequestHandler. Maybe move the code there so # that it doesn't need to be part of the public API. The API has # never been defined so this could cause backwards compatibility # issues. def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.keys(): if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst
(policy=Compat32())
20,632
email.message
__bytes__
Return the entire formatted message as a bytes object.
def __bytes__(self): """Return the entire formatted message as a bytes object. """ return self.as_bytes()
(self)
20,633
email.message
__contains__
null
def __contains__(self, name): return name.lower() in [k.lower() for k, v in self._headers]
(self, name)
20,634
email.message
__delitem__
Delete all occurrences of a header, if present. Does not raise an exception if the header is missing.
def __delitem__(self, name): """Delete all occurrences of a header, if present. Does not raise an exception if the header is missing. """ name = name.lower() newheaders = [] for k, v in self._headers: if k.lower() != name: newheaders.append((k, v)) self._headers = newheaders
(self, name)
20,635
email.message
__getitem__
Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name.
def __getitem__(self, name): """Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name. """ return self.get(name)
(self, name)
20,636
email.message
__init__
null
def __init__(self, policy=compat32): self.policy = policy self._headers = [] self._unixfrom = None self._payload = None self._charset = None # Defaults for multipart messages self.preamble = self.epilogue = None self.defects = [] # Default content type self._default_type = 'text/plain'
(self, policy=Compat32())
20,637
email.message
__iter__
null
def __iter__(self): for field, value in self._headers: yield field
(self)
20,638
email.message
__len__
Return the total number of headers, including duplicates.
def __len__(self): """Return the total number of headers, including duplicates.""" return len(self._headers)
(self)
20,639
email.message
__setitem__
Set the value of a header. Note: this does not overwrite an existing header with the same field name. Use __delitem__() first to delete any existing headers.
def __setitem__(self, name, val): """Set the value of a header. Note: this does not overwrite an existing header with the same field name. Use __delitem__() first to delete any existing headers. """ max_count = self.policy.header_max_count(name) if max_count: lname = name.lower() found = 0 for k, v in self._headers: if k.lower() == lname: found += 1 if found >= max_count: raise ValueError("There may be at most {} {} headers " "in a message".format(max_count, name)) self._headers.append(self.policy.header_store_parse(name, val))
(self, name, val)
20,640
email.message
__str__
Return the entire formatted message as a string.
def __str__(self): """Return the entire formatted message as a string. """ return self.as_string()
(self)
20,641
email.message
_get_params_preserve
null
def _get_params_preserve(self, failobj, header): # Like get_params() but preserves the quoting of values. BAW: # should this be part of the public interface? missing = object() value = self.get(header, missing) if value is missing: return failobj params = [] for p in _parseparam(value): try: name, val = p.split('=', 1) name = name.strip() val = val.strip() except ValueError: # Must have been a bare attribute name = p.strip() val = '' params.append((name, val)) params = utils.decode_params(params) return params
(self, failobj, header)
20,642
email.message
add_header
Extended header setting. name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. If a parameter value contains non-ASCII characters it can be specified as a three-tuple of (charset, language, value), in which case it will be encoded according to RFC2231 rules. Otherwise it will be encoded using the utf-8 charset and a language of ''. Examples: msg.add_header('content-disposition', 'attachment', filename='bud.gif') msg.add_header('content-disposition', 'attachment', filename=('utf-8', '', Fußballer.ppt')) msg.add_header('content-disposition', 'attachment', filename='Fußballer.ppt'))
def add_header(self, _name, _value, **_params): """Extended header setting. name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. If a parameter value contains non-ASCII characters it can be specified as a three-tuple of (charset, language, value), in which case it will be encoded according to RFC2231 rules. Otherwise it will be encoded using the utf-8 charset and a language of ''. Examples: msg.add_header('content-disposition', 'attachment', filename='bud.gif') msg.add_header('content-disposition', 'attachment', filename=('utf-8', '', Fußballer.ppt')) msg.add_header('content-disposition', 'attachment', filename='Fußballer.ppt')) """ parts = [] for k, v in _params.items(): if v is None: parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) if _value is not None: parts.insert(0, _value) self[_name] = SEMISPACE.join(parts)
(self, _name, _value, **_params)
20,643
email.message
as_bytes
Return the entire formatted message as a bytes object. Optional 'unixfrom', when true, means include the Unix From_ envelope header. 'policy' is passed to the BytesGenerator instance used to serialize the message; if not specified the policy associated with the message instance is used.
def as_bytes(self, unixfrom=False, policy=None): """Return the entire formatted message as a bytes object. Optional 'unixfrom', when true, means include the Unix From_ envelope header. 'policy' is passed to the BytesGenerator instance used to serialize the message; if not specified the policy associated with the message instance is used. """ from email.generator import BytesGenerator policy = self.policy if policy is None else policy fp = BytesIO() g = BytesGenerator(fp, mangle_from_=False, policy=policy) g.flatten(self, unixfrom=unixfrom) return fp.getvalue()
(self, unixfrom=False, policy=None)
20,644
email.message
as_string
Return the entire formatted message as a string. Optional 'unixfrom', when true, means include the Unix From_ envelope header. For backward compatibility reasons, if maxheaderlen is not specified it defaults to 0, so you must override it explicitly if you want a different maxheaderlen. 'policy' is passed to the Generator instance used to serialize the message; if it is not specified the policy associated with the message instance is used. If the message object contains binary data that is not encoded according to RFC standards, the non-compliant data will be replaced by unicode "unknown character" code points.
def as_string(self, unixfrom=False, maxheaderlen=0, policy=None): """Return the entire formatted message as a string. Optional 'unixfrom', when true, means include the Unix From_ envelope header. For backward compatibility reasons, if maxheaderlen is not specified it defaults to 0, so you must override it explicitly if you want a different maxheaderlen. 'policy' is passed to the Generator instance used to serialize the message; if it is not specified the policy associated with the message instance is used. If the message object contains binary data that is not encoded according to RFC standards, the non-compliant data will be replaced by unicode "unknown character" code points. """ from email.generator import Generator policy = self.policy if policy is None else policy fp = StringIO() g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen, policy=policy) g.flatten(self, unixfrom=unixfrom) return fp.getvalue()
(self, unixfrom=False, maxheaderlen=0, policy=None)
20,645
email.message
attach
Add the given payload to the current payload. The current payload will always be a list of objects after this method is called. If you want to set the payload to a scalar object, use set_payload() instead.
def attach(self, payload): """Add the given payload to the current payload. The current payload will always be a list of objects after this method is called. If you want to set the payload to a scalar object, use set_payload() instead. """ if self._payload is None: self._payload = [payload] else: try: self._payload.append(payload) except AttributeError: raise TypeError("Attach is not valid on a message with a" " non-multipart payload")
(self, payload)
20,646
email.message
del_param
Remove the given parameter completely from the Content-Type header. The header will be re-written in place without the parameter or its value. All values will be quoted as necessary unless requote is False. Optional header specifies an alternative to the Content-Type header.
def del_param(self, param, header='content-type', requote=True): """Remove the given parameter completely from the Content-Type header. The header will be re-written in place without the parameter or its value. All values will be quoted as necessary unless requote is False. Optional header specifies an alternative to the Content-Type header. """ if header not in self: return new_ctype = '' for p, v in self.get_params(header=header, unquote=requote): if p.lower() != param.lower(): if not new_ctype: new_ctype = _formatparam(p, v, requote) else: new_ctype = SEMISPACE.join([new_ctype, _formatparam(p, v, requote)]) if new_ctype != self.get(header): del self[header] self[header] = new_ctype
(self, param, header='content-type', requote=True)