inputs
stringlengths
312
52k
targets
stringlengths
1
3.1k
block_type
stringclasses
11 values
scenario
stringclasses
7 values
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take import json from collections import Counter import warnings import logging from typing import List, Union, Optional, Iterable import numpy as np from searcharray.phrase.scan_merge import scan_merge_ins from searcharray.phrase.posn_diffs import compute_phrase_freqs from searcharray.phrase.middle_out import PosnBitArray from searcharray.similarity import Similarity, default_bm25 from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list from searcharray.term_dict import TermMissingError logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) class Terms: """An indexed search doc - a single bag of tokenized words and positions.""" def __init__(self, postings, doc_len: int = 0, posns: Optional[dict] = None, encoded=False): self.postings = postings self.posns = None self.encoded = encoded self.doc_len = doc_len self.posns = posns def _validate_posns(self): # (For testing/assertions) - Confirm every term in positions also in postings if self.posns is None: return for term in self.posns: if term not in self.postings: raise ValueError(f"Term {term} in positions but not in postings. ") def termfreq(self, token): return self.postings[token] def terms(self): return self.postings.items() def positions(self, term=None): if self.posns is None: return {} if term is None: posns = self.posns.items() else: posns = self.posns[term] return posns def raw_positions(self, term_dict, term=None): if self.posns is None: return {} if term is None: posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()] else: posns = [(term_dict.get_term_id(term), self.posns[term])] return posns def tf_to_dense(self, term_dict): """Convert to a dense vector of term frequencies.""" dense = np.zeros(len(term_dict)) for term, freq in self.terms(): dense[term_dict.get_term_id(term)] = freq return dense def __len__(self): return len(self.postings) def __repr__(self): posting_keys = set(self.postings.keys()) rval = f"Terms({posting_keys})" return rval def __str__(self): return repr(self) def __eq__(self, other): # Flip to the other implementation if we're comparing to a SearchArray # to get a boolean array back if isinstance(other, SearchArray): return other == self same_postings = isinstance(other, Terms) and self.postings == other.postings if same_postings and self.doc_len == other.doc_len: return True def __lt__(self, other): # return isinstance(other, Terms) and hash(self) < hash(other) keys_both = set(self.postings.keys()).union(set(other.postings.keys())) # Sort lexically keys_both = sorted(keys_both) # Iterate as if these are two vectors of the same large dimensional vector sparse for key in keys_both: lhs_val = 0 rhs_val = 0 try: lhs_val = self.postings[key] except KeyError: pass try: rhs_val = other.postings[key] except KeyError: pass if lhs_val < rhs_val: return True elif lhs_val > rhs_val: return False else: continue return False def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other) and self != other def __hash__(self): return hash(json.dumps(self.postings, sort_keys=True)) class TermsDtype(ExtensionDtype): """Pandas dtype for terms.""" name = 'tokenized_text' type = Terms kind = 'O' @classmethod def construct_from_string(cls, string): if not isinstance(string, str): raise TypeError( "'construct_from_string' expects a string, got {}".format(type(string)) ) elif string == cls.name: return cls() else: raise TypeError( "Cannot construct a '{}' from '{}'".format(cls.__name__, string) ) @classmethod def construct_array_type(cls): return SearchArray def __repr__(self): return 'TermsDtype()' @property def na_value(self): return Terms({}) def valid_value(self, value): return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms) register_extension_dtype(TermsDtype) def ws_tokenizer(string): if pd.isna(string): return [] if not isinstance(string, str): raise ValueError("Expected a string") return string.split() def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray): tfs = {} labeled_posns = {} for term_idx in row.cols: term = term_dict.get_term(term_idx) tfs[term] = 1 enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id) labeled_posns[term] = enc_term_posns result = Terms(tfs, posns=labeled_posns, doc_len=doc_len, encoded=True) return result class SearchArray(ExtensionArray): """An array of tokenized text (Termss).""" dtype = TermsDtype() def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True): # Check dtype, raise TypeError if not is_list_like(postings): raise TypeError("Expected list-like object, got {}".format(type(postings))) self.avoid_copies = avoid_copies self.tokenizer = tokenizer self.term_mat, self.posns, \ self.term_dict, self.avg_doc_length, \ self.doc_lens = build_index_from_terms_list(postings, Terms) @classmethod def index(cls, array: Iterable, tokenizer=ws_tokenizer, truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray': """Index an array of strings using tokenizer.""" if not is_list_like(array): raise TypeError("Expected list-like object, got {}".format(type(array))) term_mat, posns, term_dict, avg_doc_length, doc_lens =\ build_index_from_tokenizer(array, tokenizer, batch_size=batch_size, truncate=truncate) postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies) postings.term_mat = term_mat postings.posns = posns postings.term_dict = term_dict postings.avg_doc_length = avg_doc_length postings.doc_lens = doc_lens return postings @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).""" if dtype is not None: if not isinstance(dtype, TermsDtype): return scalars if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype(): return cls(scalars) # String types elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US': return cls(scalars) # Other objects elif isinstance(scalars, np.ndarray) and scalars.dtype != object: return scalars return cls(scalars) def memory_usage(self, deep=False): """Return memory usage of this array in bytes.""" return self.nbytes @property def nbytes(self): return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes def __getitem__(self, key): key = pd.api.indexers.check_array_indexer(self, key) # Want to take rows of term freqs if isinstance(key, numbers.Integral): try: rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, self.posns) except IndexError: raise IndexError("index out of bounds") else: # Construct a sliced view of this array sliced_tfs = self.term_mat.slice(key) sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns arr = SearchArray([], tokenizer=self.tokenizer) arr.term_mat = sliced_tfs arr.doc_lens = self.doc_lens[key] arr.posns = sliced_posns arr.term_dict = self.term_dict arr.avg_doc_length = self.avg_doc_length return arr def __setitem__(self, key, value): """Set an item in the array.""" key = pd.api.indexers.check_array_indexer(self, key) if isinstance(value, pd.Series): value = value.values if isinstance(value, pd.DataFrame): value = value.values.flatten() if isinstance(value, SearchArray): value = value.to_numpy() if isinstance(value, list): value = np.asarray(value, dtype=object) if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value): raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}") # Cant set a single value to an array if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray): raise ValueError("Cannot set a single value to an array") try: is_encoded = False posns = None term_mat = np.asarray([]) doc_lens = np.asarray([]) if isinstance(value, float): term_mat = np.asarray([value]) doc_lens = np.asarray([0]) elif isinstance(value, Terms): term_mat = np.asarray([value.tf_to_dense(self.term_dict)]) doc_lens = np.asarray([value.doc_len]) is_encoded = value.encoded posns = [value.raw_positions(self.term_dict)] elif isinstance(value, np.ndarray): term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value]) doc_lens = np.asarray([x.doc_len for x in value]) is_encoded = value[0].encoded if len(value) > 0 else False posns = [x.raw_positions(self.term_dict) for x in value] np.nan_to_num(term_mat, copy=False, nan=0) self.term_mat[key] = term_mat self.doc_lens[key] = doc_lens if posns is not None: self.posns.insert(key, posns, is_encoded) # Assume we have a positions for each term, doc pair. We can just update it. # Otherwise we would have added new terms except TermMissingError: self._add_new_terms(key, value) def _add_new_terms(self, key, value): msg = """Adding new terms! This might not be good if you tokenized this new text with a different tokenizer. Also. This is slow.""" warnings.warn(msg) scan_value = value if isinstance(value, Terms): scan_value = np.asarray([value]) for row in scan_value: for term in row.terms(): self.term_dict.add_term(term[0]) self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict))) # Ensure posns_lookup has at least max self.posns self[key] = value def value_counts( self, dropna: bool = True, ): if dropna: counts = Counter(self[:]) counts.pop(Terms({}), None) else: counts = Counter(self[:]) return pd.Series(counts) def __len__(self): len_rval = len(self.term_mat.rows) return len_rval def __ne__(self, other): if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented return ~(self == other) def __eq__(self, other): """Return a boolean numpy array indicating elementwise equality.""" # When other is a dataframe or series, not implemented if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented # When other is an ExtensionArray if isinstance(other, SearchArray): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) else: # Compatible term dicts, and same term freqs # (not looking at positions, maybe we should?) if self.term_dict.compatible(other.term_dict): return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens) else: return np.zeros(len(self), dtype=bool) # return np.array(self[:]) == np.array(other[:]) # When other is a scalar value elif isinstance(other, Terms): other = SearchArray([other], tokenizer=self.tokenizer) warnings.warn("Comparing a scalar value to a SearchArray. This is slow.") return np.array(self[:]) == np.array(other[:]) # When other is a sequence but not an ExtensionArray # its an array of dicts elif is_list_like(other): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) # We actually don't know how it was tokenized other = SearchArray(other, tokenizer=self.tokenizer) return np.array(self[:]) == np.array(other[:]) # Return False where 'other' is neither the same length nor a scalar else: return np.full(len(self), False) def isna(self): # Every row with all 0s empties = self.doc_lens == 0 return empties def take(self, indices, allow_fill=False, fill_value=None): # Want to take rows of term freqs row_indices = np.arange(len(self.term_mat.rows)) # Take within the row indices themselves result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1) if allow_fill and -1 in result_indices: if fill_value is None or pd.isna(fill_value): fill_value = Terms({}, encoded=True) to_fill_mask = result_indices == -1 # This is slow as it rebuilds all the term dictionaries # on the subsequent assignment lines # However, this case tends to be the exception for # most dataframe operations taken = SearchArray([fill_value] * len(result_indices)) taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy() return taken else: taken = self[result_indices].copy() return taken def copy(self): postings_arr = SearchArray([], tokenizer=self.tokenizer) postings_arr.doc_lens = self.doc_lens.copy() postings_arr.term_mat = self.term_mat.copy() postings_arr.posns = self.posns postings_arr.term_dict = self.term_dict postings_arr.avg_doc_length = self.avg_doc_length if not self.avoid_copies: postings_arr.posns = self.posns.copy() postings_arr.term_dict = self.term_dict.copy() return postings_arr @classmethod def _concat_same_type(cls, to_concat): concatenated_data = np.concatenate([ea[:] for ea in to_concat]) return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer) @classmethod def _from_factorized(cls, values, original): return cls(values) def _values_for_factorize(self): """Return an array and missing value suitable for factorization (ie grouping).""" arr = np.asarray(self[:], dtype=object) return arr, Terms({}) def _check_token_arg(self, token): if isinstance(token, str): return token elif isinstance(token, list) and len(token) == 1: return token[0] elif isinstance(token, list): return token else: raise TypeError("Expected a string or list of strings for phrases") # *********************************************************** # Search functionality # *********************************************************** def termfreqs(self, token: Union[List[str], str]) -> np.ndarray: token = self._check_token_arg(token) if isinstance(token, list): return self.phrase_freq(token) try: term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) mask = np.isin(self.term_mat.rows, doc_ids) matches[mask] = termfreqs return matches else: doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) matches[doc_ids] = termfreqs return matches except TermMissingError: return np.zeros(len(self), dtype=int) def docfreq(self, token: str) -> int: if not isinstance(token, str): raise TypeError("Expected a string") # Count number of rows where the term appears try: return self.posns.docfreq(self.term_dict.get_term_id(token)) except TermMissingError: return 0 def doclengths(self) -> np.ndarray: return self.doc_lens def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray: """Return a boolean numpy array indicating which elements contain the given term.""" token = self._check_token_arg(token) if isinstance(token, list): term_freq = self.phrase_freq(token) else: term_freq = self.termfreqs(token) return term_freq > 0 def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray: """Score each doc using a similarity function. Parameters ---------- token : str or list of str of what to search (already tokenized) similarity : How to score the documents. Default is BM25. """ # Get term freqs per token token = self._check_token_arg(token) # For expensive toknes, we compute doc freq first, so we # cache them in the DF cache, to let TF cache know it should be cached tokens_l = [token] if isinstance(token, str) else token all_dfs = np.asarray([self.docfreq(token) for token in tokens_l]) tfs = self.termfreqs(token) token = self._check_token_arg(token) doc_lens = self.doclengths() scores = similarity(term_freqs=tfs, doc_freqs=all_dfs, doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length, num_docs=len(self)) return scores def positions(self, token: str, key=None) -> List[np.ndarray]: """Return a list of lists of positions of the given term.""" term_id = self.term_dict.get_term_id(token) key = self.term_mat.rows[key] if key is not None else self.term_mat.rows posns = self.posns.positions(term_id, doc_ids=key) return posns def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.ones(len(self), dtype=bool) for curr_mask in masks: mask <fim_suffix> & curr_mask return mask def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.sum(masks, axis=0) >= min_should_match return mask def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray: if slop == 1 and len(tokens) == len(set(tokens)): phrase_freqs = np.zeros(len(self)) try: doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs) except TermMissingError: return phrase_freqs else: return self.phrase_freq_every_diff(tokens, slop=slop) def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray: if mask is None: mask = self.and_query(tokens) if np.sum(mask) == 0: return mask # Gather positions posns = [self.positions(token, mask) for token in tokens] phrase_freqs = np.zeros(len(self)) phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop) return phrase_freqs def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray: phrase_freqs = -np.ones(len(self)) mask = self.and_query(tokens) phrase_freqs[~mask] = 0 if np.sum(mask) == 0: return phrase_freqs term_posns = [self.positions(term, mask) for term in tokens] for width in [10, 20, 30, 40]: phrase_freqs[mask] = compute_phrase_freqs(term_posns, phrase_freqs[mask], slop=slop, width=width) remaining_mask = phrase_freqs == -1 if np.any(remaining_mask): remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop) phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask] return phrase_freqs <fim_middle>= mask
= mask
STATEMENT
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores <fim_suffix> [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>=
=
STATEMENT
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tuple, List, Union logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000) DEFAULT_KEY_BITS = np.uint64(28) DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000) DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18) DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF) DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18) # To not constantly type coerce _64 = np.uint64(64) _2 = np.uint64(2) _1 = np.uint64(1) _0 = np.uint64(0) _neg1 = np.int64(-1) _algorithm = snp.GALLOPING_SEARCH def n_msb_mask(n: np.uint64) -> np.uint64: """Return the n most significant bits of num.""" return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1) def sorted_unique(arr: np.ndarray) -> np.ndarray: return snp.intersect(arr, arr, duplicates=snp.DROP) class RoaringishEncoder: """An encoder for key->integer sets as a numpy array. Each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs | payload (different number of MSBs / payload bits can be specified) """ def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS): payload_bits = _64 - key_bits self.payload_msb_bits = payload_bits // _2 self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits) self.key_bits = key_bits assert self.key_bits.dtype == np.uint64 # key bits MSB of 64 bits self.key_mask = n_msb_mask(key_bits) self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}" assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}" self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1) assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}" assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}" if key_bits == DEFAULT_KEY_BITS: assert self.key_mask == DEFAULT_KEY_MASK assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK self.max_payload = np.uint64(2**self.payload_lsb_bits - 1) def validate_payload(self, payload: np.ndarray): """Optional validation of payload.""" if np.any(payload > self.max_payload): raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}") def encode(self, payload: np.ndarray, keys: Optional[np.ndarray] = None, boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: """Pack a sorted array of integers into compact bit numpy array. each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs| payload for later easy intersection of 32+16 msbs, then checking for adjacent positions If boundaries are provided, then we consider multiple distinct payloads being encoded simultaneously, and we return the boundaries of each """ cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use cols <<= self.payload_msb_bits if keys is not None: cols |= keys.astype(np.uint64) << (_64 - self.key_bits) values = payload % self.payload_lsb_bits # Value to encode change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1 change_indices_one_doc = np.concatenate([[0], change_indices_one_doc]) if boundaries is not None: change_indices = snp.merge(change_indices_one_doc, boundaries, duplicates=snp.DROP) new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1] new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]]) else: change_indices = change_indices_one_doc new_boundaries = None # 0 as a position, goes in bit 1, # 1 as a position, goes in bit 2, etc values = _1 << values cols |= values encoded = cols if len(encoded) == 0: return encoded, new_boundaries reduced = np.bitwise_or.reduceat(encoded, change_indices) return reduced, new_boundaries def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]: """Decode an encoded bit array into keys / payloads.""" keys = (encoded & self.key_mask) >> (_64 - self.key_bits) msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits to_concat = [] for bit in range(self.payload_lsb_bits): mask = 1 << bit lsbs = encoded & mask set_lsbs = (lsbs != 0) this_keys = keys[set_lsbs] payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits) doc_with_posn = np.dstack([this_keys, payload])[0] to_concat.append(doc_with_posn) stacked = np.vstack(to_concat) # Sort by doc_id, then posn sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))] keys, idx = np.unique(sorted_payload[:, 0], return_index=True) grouped = np.split(sorted_payload[:, 1], idx[1:]) if get_keys: return list(zip(keys, grouped)) else: return grouped def keys(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" return (encoded & self.key_mask) >> (_64 - self.key_bits) def keys_unique(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" keys = self.keys(encoded) intersected = sorted_unique(keys) return intersected def payload_msb(self, encoded: np.ndarray) -> np.ndarray: """Return payload MSBs from encoded.""" return (encoded & self.payload_msb_mask) >> self.payload_msb_bits def payload_lsb(self, encoded: np.ndarray) -> np.ndarray: """Return payload LSBs from encoded.""" return encoded & self.payload_lsb_mask def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray, rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values rshift : int how much to shift rhs by to the right """ rhs_int = rhs assert rshift < 0, "rshift must be negative" rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)] rshft = rshift.view(np.uint64) rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, <fim_suffix> >> self.payload_lsb_bits, rhs_shifted, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs_int[rhs_idx] def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values """ # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs >> self.payload_lsb_bits, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs[rhs_idx] def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray: """Get list of encoded that have values in keys.""" assert len(keys.shape) == 1 assert len(encoded.shape) == 1 encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits) _, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True, duplicates=snp.KEEP_MAX_N, algorithm=_algorithm) return encoded[idx_enc] def convert_keys(keys) -> np.ndarray: """Convert keys to range or np.ndarray of uint64.""" if isinstance(keys, numbers.Number): return np.asarray([keys], dtype=np.uint64) elif isinstance(keys, list): return np.asarray(keys, dtype=np.uint64) elif isinstance(keys, np.ndarray): return keys.astype(np.uint64) elif isinstance(keys, range) and len(keys) > 0: # UNFORTUNATE COPY return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0] elif isinstance(keys, range): return np.asarray([], dtype=np.uint64) raise ValueError(f"Unknown type for keys: {type(keys)}") <fim_middle>rhs_idx) = snp.intersect(lhs
rhs_idx) = snp.intersect(lhs
STATEMENT
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tuple, List, Union logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000) DEFAULT_KEY_BITS = np.uint64(28) DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000) DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18) DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF) DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18) # To not constantly type coerce _64 = np.uint64(64) _2 = np.uint64(2) _1 = np.uint64(1) _0 = np.uint64(0) _neg1 = np.int64(-1) _algorithm = snp.GALLOPING_SEARCH def n_msb_mask(n: np.uint64) -> np.uint64: """Return the n most significant bits of num.""" return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1) def sorted_unique(arr: np.ndarray) -> np.ndarray: return snp.intersect(arr, arr, duplicates=snp.DROP) class RoaringishEncoder: """An encoder for key->integer sets as a numpy array. Each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs | payload (different number of MSBs / payload bits can be specified) """ def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS): payload_bits = _64 - key_bits self.payload_msb_bits = payload_bits // _2 self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits) self.key_bits = key_bits assert self.key_bits.dtype == np.uint64 # key bits MSB of 64 bits self.key_mask = n_msb_mask(key_bits) self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}" assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}" self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1) assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}" assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}" if key_bits == DEFAULT_KEY_BITS: assert self.key_mask == DEFAULT_KEY_MASK assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK self.max_payload = np.uint64(2**self.payload_lsb_bits - 1) def validate_payload(self, payload: np.ndarray): """Optional validation of payload.""" if np.any(payload > self.max_payload): raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}") def encode(self, payload: np.ndarray, keys: Optional[np.ndarray] = None, boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: """Pack a sorted array of integers into compact bit numpy array. each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs| payload for later easy intersection of 32+16 msbs, then checking for adjacent positions If boundaries are provided, then we consider multiple distinct payloads being encoded simultaneously, and we return the boundaries of each """ cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use cols <<= self.payload_msb_bits if keys is not None: cols |= keys.astype(np.uint64) << (_64 - self.key_bits) values = payload % self.payload_lsb_bits # Value to encode change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1 change_indices_one_doc = np.concatenate([[0], change_indices_one_doc]) if boundaries is not None: change_indices = snp.merge(change_indices_one_doc, boundaries, duplicates=snp.DROP) new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1] new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]]) else: change_indices = change_indices_one_doc new_boundaries = None # 0 as a position, goes in bit 1, # 1 as a position, goes in bit 2, etc values = _1 << values cols |= values encoded = cols if len(encoded) == 0: return encoded, new_boundaries reduced = np.bitwise_or.reduceat(encoded, change_indices) return reduced, new_boundaries def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]: """Decode an encoded bit array into keys / payloads.""" keys = (encoded & self.key_mask) >> (_64 - self.key_bits) msbs <fim_suffix> self.payload_msb_mask) >> self.payload_msb_bits to_concat = [] for bit in range(self.payload_lsb_bits): mask = 1 << bit lsbs = encoded & mask set_lsbs = (lsbs != 0) this_keys = keys[set_lsbs] payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits) doc_with_posn = np.dstack([this_keys, payload])[0] to_concat.append(doc_with_posn) stacked = np.vstack(to_concat) # Sort by doc_id, then posn sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))] keys, idx = np.unique(sorted_payload[:, 0], return_index=True) grouped = np.split(sorted_payload[:, 1], idx[1:]) if get_keys: return list(zip(keys, grouped)) else: return grouped def keys(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" return (encoded & self.key_mask) >> (_64 - self.key_bits) def keys_unique(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" keys = self.keys(encoded) intersected = sorted_unique(keys) return intersected def payload_msb(self, encoded: np.ndarray) -> np.ndarray: """Return payload MSBs from encoded.""" return (encoded & self.payload_msb_mask) >> self.payload_msb_bits def payload_lsb(self, encoded: np.ndarray) -> np.ndarray: """Return payload LSBs from encoded.""" return encoded & self.payload_lsb_mask def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray, rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values rshift : int how much to shift rhs by to the right """ rhs_int = rhs assert rshift < 0, "rshift must be negative" rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)] rshft = rshift.view(np.uint64) rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs_shifted, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs_int[rhs_idx] def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values """ # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs >> self.payload_lsb_bits, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs[rhs_idx] def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray: """Get list of encoded that have values in keys.""" assert len(keys.shape) == 1 assert len(encoded.shape) == 1 encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits) _, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True, duplicates=snp.KEEP_MAX_N, algorithm=_algorithm) return encoded[idx_enc] def convert_keys(keys) -> np.ndarray: """Convert keys to range or np.ndarray of uint64.""" if isinstance(keys, numbers.Number): return np.asarray([keys], dtype=np.uint64) elif isinstance(keys, list): return np.asarray(keys, dtype=np.uint64) elif isinstance(keys, np.ndarray): return keys.astype(np.uint64) elif isinstance(keys, range) and len(keys) > 0: # UNFORTUNATE COPY return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0] elif isinstance(keys, range): return np.asarray([], dtype=np.uint64) raise ValueError(f"Unknown type for keys: {type(keys)}") <fim_middle>= (encoded &
= (encoded &
STATEMENT
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np import sortednp as snp from copy import deepcopy from typing import List, Tuple, Dict, Union, cast, Optional from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique import numbers import logging from collections import defaultdict from searcharray.utils.bitcount import bit_count64 logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) encoder = RoaringishEncoder() # To not constantly type coerce _64 = np.uint64(64) _2 = np.uint64(2) _1 = np.uint64(1) _0 = np.uint64(0) _neg1 = np.int64(-1) MAX_POSN = encoder.max_payload def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ lhs_int, rhs_int = encoder.intersect(lhs, rhs) lhs_doc_ids = encoder.keys(lhs_int) if len(lhs_int) != len(rhs_int): raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.") if len(lhs_int) == 0: return phrase_freqs, rhs_int same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0]) if same_term: # Find adjacent matches rhs_shift = rhs_int << _1 overlap = lhs_int & rhs_shift overlap = encoder.payload_lsb(overlap) adjacents = bit_count64(overlap).astype(np.int64) adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide phrase_freqs[lhs_doc_ids] += adjacents return phrase_freqs, rhs_int overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1) rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask)) phrase_freqs2 = phrase_freqs.copy() matches2 = overlap_bits > 0 if np.any(matches2): transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1 transitions = np.insert(transitions, 0, 0) counted_bits = bit_count64(overlap_bits[matches2]) reduced = np.add.reduceat(counted_bits, transitions) phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced return phrase_freqs2, rhs_next2 def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1) lhs_doc_ids = encoder.keys(lhs_int) # lhs lsb set and rhs lsb's most significant bit set upper_bit = _1 << (encoder.payload_lsb_bits - _1) matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0) unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True) phrase_freqs[unique] += counts rhs_next = rhs_int rhs_next[~matches] |= ~encoder.payload_lsb_mask rhs_next[matches] |= (encoder.payload_lsb_mask & _1) return phrase_freqs, rhs_next def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ # Combine lhs and rhs matches from two strategies phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs) phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs) rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj])) # Combine return phrase_freqs, rhs_next def trim_phrase_search(encoded_posns: List[np.ndarray], phrase_freqs: np.ndarray) -> List[np.ndarray]: """Trim long phrases by searching the rarest terms first.""" # Start with rarest term shortest_keys = None shortest_idx = None min_len = 1e100 max_len = 0 for idx, enc_posn in enumerate(encoded_posns): if len(enc_posn) < min_len: shortest_keys = encoder.keys(enc_posn) shortest_idx = idx min_len = len(enc_posn) if len(enc_posn) > max_len: max_len = len(enc_posn) if shortest_keys is None: return encoded_posns for enc_posn_idx in range(len(encoded_posns)): if enc_posn_idx == shortest_idx: continue if len(encoded_posns[enc_posn_idx]) > (10 * min_len): encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx], shortest_keys) return encoded_posns def compute_phrase_freqs(encoded_posns: List[np.ndarray], phrase_freqs: np.ndarray) -> np.ndarray: if len(encoded_posns) < 2: raise ValueError("phrase must have at least two terms") # Trim long phrases by searching the rarest terms first if len(encoded_posns) > 3: encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs) mask = np.ones(len(phrase_freqs), dtype=bool) lhs = encoded_posns[0] for rhs in encoded_posns[1:]: # Only count the count of the last bigram (ignoring the ones where priors did not match) phrase_freqs[mask] = 0 phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs) mask &= (phrase_freqs > 0) phrase_freqs[~mask] = 0 return phrase_freqs class PosnBitArrayFromFlatBuilder: """ Build from sorted array shape num terms x 3. 0th is term id 1st is doc id 2nd is posn Sorted by term id then posns """ def __init__(self, flat_array: np.ndarray): self.flat_array = flat_array def build(self): """Slice the flat array into a 2d array of doc ids and posns.""" term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1 term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]]) encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64), boundaries=term_boundaries[:-1], payload=self.flat_array[2].view(np.uint64)) term_ids = self.flat_array[0][term_boundaries[:-1]] encoded_term_posns = {} for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])): sliced = encoded[beg_idx:end_idx] encoded_term_posns[term_ids[into_terms]] = sliced return PosnBitArray(encoded_term_posns, self.flat_array[1].max()) class PosnBitArrayBuilder: def __init__(self): self.term_posns = defaultdict(list) self.term_posn_doc_ids = defaultdict(list) self.max_doc_id = 0 def add_posns(self, doc_id: int, term_id: int, posns: List[int]): doc_ids = [doc_id] * len(posns) self.term_posns[term_id].extend(posns) self.term_posn_doc_ids[term_id].extend(doc_ids) def ensure_capacity(self, doc_id): self.max_doc_id = max(self.max_doc_id, doc_id) def build(self, check=False): encoded_term_posns = {} for term_id, posns in self.term_posns.items(): if len(posns) == 0: posns = np.asarray([], dtype=np.uint32).flatten() elif isinstance(posns, list): posns_arr = np.asarray(posns, dtype=np.uint32).flatten() posns = posns_arr doc_ids = self.term_posn_doc_ids[term_id] if isinstance(doc_ids, list): doc_ids = np.asarray(doc_ids, dtype=np.uint32) encoded = encoder.encode(keys=doc_ids, payload=posns) if check: decode_again = encoder.decode(encoded) docs_to_posns = dict(decode_again) doc_ids_again = [] posns_again = [] for doc_id, posns_dec in docs_to_posns.items(): for posn in posns_dec: doc_ids_again.append(doc_id) posns_again.append(posn) assert np.array_equal(doc_ids_again, doc_ids) assert np.array_equal(posns, posns_again) encoded_term_posns[term_id] = encoded return PosnBitArray(encoded_term_posns, self.max_doc_id) class PosnBitArrayAlreadyEncBuilder: def __init__(self): self.encoded_term_posns = {} self.max_doc_id = 0 def add_posns(self, doc_id: int, term_id: int, posns): self.encoded_term_posns[term_id] = posns def ensure_capacity(self, doc_id): self.max_doc_id = max(self.max_doc_id, doc_id) def build(self, check=False): return PosnBitArray(self.encoded_term_posns, self.max_doc_id) def index_range(rng, key): if key is None: return rng if isinstance(rng, np.ndarray): return rng[key] if isinstance(key, slice): return rng[key] elif isinstance(key, numbers.Number): return rng[key] elif isinstance(key, np.ndarray): try: # UNFORTUNATE COPY r_val = np.asarray(list(rng))[key] return r_val except IndexError as e: raise e # Last resort # UNFORTUNATE COPY # Here probably elipses or a tuple of various things return np.asarray(list(rng))[key] class PosnBitArray: def __init__(self, encoded_term_posns, max_doc_id: int): self.encoded_term_posns = encoded_term_posns self.max_doc_id = max_doc_id self.docfreq_cache : Dict[int, np.uint64] = {} self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {} def copy(self): new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id) return new def concat(self, other): """Merge other into self. Assumes other's doc ids are not overlapping with self's doc ids. """ # Shared terms shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys())) for term_id in shared_terms: # Append then sort self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]]) self.encoded_term_posns[term_id].sort() only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys())) for term_id in only_other_terms: self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id] self.max_doc_id = max(self.max_doc_id, other.max_doc_id) # Empty caches self.termfreq_cache = {} self.docfreq_cache = {} def slice(self, key): sliced_term_posns = {} doc_ids = convert_keys(key) max_doc_id = np.max(doc_ids) for term_id, posns in self.encoded_term_posns.items(): encoded = self.encoded_term_posns[term_id] assert len(encoded.shape) == 1 sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids) return PosnBitArray(sliced_term_posns, max_doc_id) def __getitem__(self, key): return self.slice(key) def merge(self, other): # Unique terms unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys())) for term_id in unique_terms: if term_id not in other.encoded_term_posns: continue elif term_id not in self.encoded_term_posns: self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id] else: posns_self = self.encoded_term_posns[term_id] posns_other = other.encoded_term_posns[term_id] self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other) self.max_doc_id = self.max_doc_id + other.max_doc_id # Empty caches self.termfreq_cache = {} self.docfreq_cache = {} def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray: term_posns = encoder.slice(self.encoded_term_posns[term_id], keys=np.asarray([doc_id], dtype=np.uint64)) return term_posns def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray, doc_ids: np.ndarray) -> np.ndarray: if len(term_ids) < 2: raise ValueError("Must have at least two terms") if phrase_freqs.shape[0] == self.max_doc_id + 1: enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids] else: enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id], keys=doc_ids.view(np.uint64)) for term_id in term_ids] return compute_phrase_freqs(enc_term_posns, phrase_freqs) def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]: if isinstance(doc_ids, numbers.Number): doc_ids = np.asarray([doc_ids]) try: np_doc_ids = convert_keys(doc_ids) term_posns = encoder.slice(self.encoded_term_posns[term_id], keys=np_doc_ids) except KeyError: r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids] if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number): return [r_val[0]] return r_val decoded = encoder.decode(encoded=term_posns, get_keys=True) if len(decoded) == 0: return <fim_suffix> dtype=np.uint32)] if len(decoded) != len(doc_ids): # Fill non matches decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded) as_dict: Dict[np.uint64, np.ndarray] = dict(decoded) decs = [] for doc_id in doc_ids: if doc_id in as_dict: decs.append(as_dict[doc_id]) else: decs.append(np.array([], dtype=np.uint32)) return decs else: decs = [dec[1] for dec in decoded] return decs def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]: """Count term freqs using unique positions.""" if doc_ids is None: return self._termfreqs_with_cache(term_id) encoded = self.encoded_term_posns[term_id] term_posns = encoded term_posns = encoder.slice(encoded, keys=doc_ids.astype(np.uint64)) return self._computed_term_freqs(term_posns) def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]: doc_ids = encoder.keys(term_posns) change_indices = np.nonzero(np.diff(doc_ids))[0] change_indices = np.concatenate((np.asarray([0]), change_indices + 1)) posns = term_posns & encoder.payload_lsb_mask bit_counts = bit_count64(posns) term_freqs = np.add.reduceat(bit_counts, change_indices) return sorted_unique(doc_ids), term_freqs def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]: try: return self.termfreq_cache[term_id] except KeyError: term_posns = self.encoded_term_posns[term_id] doc_ids, term_freqs = self._computed_term_freqs(term_posns) if self._is_cached(term_id): self.termfreq_cache[term_id] = (doc_ids, term_freqs) return doc_ids, term_freqs def _is_cached(self, term_id: int) -> bool: return term_id in self.docfreq_cache def _docfreq_from_cache(self, term_id: int) -> np.uint64: return self.docfreq_cache[term_id] def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64): if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100): self.docfreq_cache[term_id] = docfreq def docfreq(self, term_id: int) -> np.uint64: try: return self.docfreq_cache[term_id] except KeyError: encoded = self.encoded_term_posns[term_id] docfreq = np.uint64(encoder.keys_unique(encoded).size) self._maybe_cache_docfreq(term_id, docfreq) return docfreq def insert(self, key, term_ids_to_posns, is_encoded=False): new_posns = PosnBitArrayBuilder() if is_encoded: new_posns = PosnBitArrayAlreadyEncBuilder() max_doc_id = 0 for doc_id, new_posns_row in enumerate(term_ids_to_posns): for term_id, positions in new_posns_row: new_posns.add_posns(doc_id, term_id, positions) max_doc_id = max(doc_id, max_doc_id) new_posns.max_doc_id = max_doc_id ins_arr = new_posns.build() self.merge(ins_arr) @property def nbytes(self): arr_bytes = 0 for doc_id, posn in self.encoded_term_posns.items(): arr_bytes += posn.nbytes for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items(): arr_bytes += doc_ids.nbytes arr_bytes += term_freqs.nbytes for term_id, docfreq in self.docfreq_cache.items(): arr_bytes += docfreq.nbytes return arr_bytes <fim_middle>[np.array([],
[np.array([],
STATEMENT
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, <fim_suffix> query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>boost in
boost in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for <fim_suffix> in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>field
field
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost <fim_suffix> phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term <fim_suffix> enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for <fim_suffix> spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>s in
s in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn <fim_suffix> range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tuple, List, Union logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000) DEFAULT_KEY_BITS = np.uint64(28) DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000) DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18) DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF) DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18) # To not constantly type coerce _64 = np.uint64(64) _2 = np.uint64(2) _1 = np.uint64(1) _0 = np.uint64(0) _neg1 = np.int64(-1) _algorithm = snp.GALLOPING_SEARCH def n_msb_mask(n: np.uint64) -> np.uint64: """Return the n most significant bits of num.""" return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1) def sorted_unique(arr: np.ndarray) -> np.ndarray: return snp.intersect(arr, arr, duplicates=snp.DROP) class RoaringishEncoder: """An encoder for key->integer sets as a numpy array. Each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs | payload (different number of MSBs / payload bits can be specified) """ def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS): payload_bits = _64 - key_bits self.payload_msb_bits = payload_bits // _2 self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits) self.key_bits = key_bits assert self.key_bits.dtype == np.uint64 # key bits MSB of 64 bits self.key_mask = n_msb_mask(key_bits) self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}" assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}" self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1) assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}" assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}" if key_bits == DEFAULT_KEY_BITS: assert self.key_mask == DEFAULT_KEY_MASK assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK self.max_payload = np.uint64(2**self.payload_lsb_bits - 1) def validate_payload(self, payload: np.ndarray): """Optional validation of payload.""" if np.any(payload > self.max_payload): raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}") def encode(self, payload: np.ndarray, keys: Optional[np.ndarray] = None, boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: """Pack a sorted array of integers into compact bit numpy array. each returned array represents a single term, with key as MSBS, ie: | 32 MSBs | 16 LSBs | 16 LSBs | key | bits msbs| payload for later easy intersection of 32+16 msbs, then checking for adjacent positions If boundaries are provided, then we consider multiple distinct payloads being encoded simultaneously, and we return the boundaries of each """ cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use cols <<= self.payload_msb_bits if keys is not None: cols |= keys.astype(np.uint64) << (_64 - self.key_bits) values = payload % self.payload_lsb_bits # Value to encode change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1 change_indices_one_doc = np.concatenate([[0], change_indices_one_doc]) if boundaries is not None: change_indices = snp.merge(change_indices_one_doc, boundaries, duplicates=snp.DROP) new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1] new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]]) else: change_indices = change_indices_one_doc new_boundaries = None # 0 as a position, goes in bit 1, # 1 as a position, goes in bit 2, etc values = _1 << values cols |= values encoded = cols if len(encoded) == 0: return encoded, new_boundaries reduced = np.bitwise_or.reduceat(encoded, change_indices) return reduced, new_boundaries def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]: """Decode an encoded bit array into keys / payloads.""" keys = (encoded & self.key_mask) >> (_64 - self.key_bits) msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits to_concat = [] for <fim_suffix> range(self.payload_lsb_bits): mask = 1 << bit lsbs = encoded & mask set_lsbs = (lsbs != 0) this_keys = keys[set_lsbs] payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits) doc_with_posn = np.dstack([this_keys, payload])[0] to_concat.append(doc_with_posn) stacked = np.vstack(to_concat) # Sort by doc_id, then posn sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))] keys, idx = np.unique(sorted_payload[:, 0], return_index=True) grouped = np.split(sorted_payload[:, 1], idx[1:]) if get_keys: return list(zip(keys, grouped)) else: return grouped def keys(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" return (encoded & self.key_mask) >> (_64 - self.key_bits) def keys_unique(self, encoded: np.ndarray) -> np.ndarray: """Return keys from encoded.""" keys = self.keys(encoded) intersected = sorted_unique(keys) return intersected def payload_msb(self, encoded: np.ndarray) -> np.ndarray: """Return payload MSBs from encoded.""" return (encoded & self.payload_msb_mask) >> self.payload_msb_bits def payload_lsb(self, encoded: np.ndarray) -> np.ndarray: """Return payload LSBs from encoded.""" return encoded & self.payload_lsb_mask def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray, rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values rshift : int how much to shift rhs by to the right """ rhs_int = rhs assert rshift < 0, "rshift must be negative" rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)] rshft = rshift.view(np.uint64) rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs_shifted, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs_int[rhs_idx] def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Return the MSBs that are common to both lhs and rhs (same keys, same MSBs) Parameters ---------- lhs : np.ndarray of uint64 (encoded) values rhs : np.ndarray of uint64 (encoded) values """ # assert np.all(np.diff(rhs_shifted) >= 0), "not sorted" _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs >> self.payload_lsb_bits, indices=True, algorithm=_algorithm) return lhs[lhs_idx], rhs[rhs_idx] def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray: """Get list of encoded that have values in keys.""" assert len(keys.shape) == 1 assert len(encoded.shape) == 1 encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits) _, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True, duplicates=snp.KEEP_MAX_N, algorithm=_algorithm) return encoded[idx_enc] def convert_keys(keys) -> np.ndarray: """Convert keys to range or np.ndarray of uint64.""" if isinstance(keys, numbers.Number): return np.asarray([keys], dtype=np.uint64) elif isinstance(keys, list): return np.asarray(keys, dtype=np.uint64) elif isinstance(keys, np.ndarray): return keys.astype(np.uint64) elif isinstance(keys, range) and len(keys) > 0: # UNFORTUNATE COPY return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0] elif isinstance(keys, range): return np.asarray([], dtype=np.uint64) raise ValueError(f"Unknown type for keys: {type(keys)}") <fim_middle>bit in
bit in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take import json from collections import Counter import warnings import logging from typing import List, Union, Optional, Iterable import numpy as np from searcharray.phrase.scan_merge import scan_merge_ins from searcharray.phrase.posn_diffs import compute_phrase_freqs from searcharray.phrase.middle_out import PosnBitArray from searcharray.similarity import Similarity, default_bm25 from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list from searcharray.term_dict import TermMissingError logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) class Terms: """An indexed search doc - a single bag of tokenized words and positions.""" def __init__(self, postings, doc_len: int = 0, posns: Optional[dict] = None, encoded=False): self.postings = postings self.posns = None self.encoded = encoded self.doc_len = doc_len self.posns = posns def _validate_posns(self): # (For testing/assertions) - Confirm every term in positions also in postings if self.posns is None: return for term in self.posns: if term not in self.postings: raise ValueError(f"Term {term} in positions but not in postings. ") def termfreq(self, token): return self.postings[token] def terms(self): return self.postings.items() def positions(self, term=None): if self.posns is None: return {} if term is None: posns = self.posns.items() else: posns = self.posns[term] return posns def raw_positions(self, term_dict, term=None): if self.posns is None: return {} if term is None: posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()] else: posns = [(term_dict.get_term_id(term), self.posns[term])] return posns def tf_to_dense(self, term_dict): """Convert to a dense vector of term frequencies.""" dense = np.zeros(len(term_dict)) for term, freq in self.terms(): dense[term_dict.get_term_id(term)] = freq return dense def __len__(self): return len(self.postings) def __repr__(self): posting_keys = set(self.postings.keys()) rval = f"Terms({posting_keys})" return rval def __str__(self): return repr(self) def __eq__(self, other): # Flip to the other implementation if we're comparing to a SearchArray # to get a boolean array back if isinstance(other, SearchArray): return other == self same_postings = isinstance(other, Terms) and self.postings == other.postings if same_postings and self.doc_len == other.doc_len: return True def __lt__(self, other): # return isinstance(other, Terms) and hash(self) < hash(other) keys_both = set(self.postings.keys()).union(set(other.postings.keys())) # Sort lexically keys_both = sorted(keys_both) # Iterate as if these are two vectors of the same large dimensional vector sparse for key in keys_both: lhs_val = 0 rhs_val = 0 try: lhs_val = self.postings[key] except KeyError: pass try: rhs_val = other.postings[key] except KeyError: pass if lhs_val < rhs_val: return True elif lhs_val > rhs_val: return False else: continue return False def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other) and self != other def __hash__(self): return hash(json.dumps(self.postings, sort_keys=True)) class TermsDtype(ExtensionDtype): """Pandas dtype for terms.""" name = 'tokenized_text' type = Terms kind = 'O' @classmethod def construct_from_string(cls, string): if not isinstance(string, str): raise TypeError( "'construct_from_string' expects a string, got {}".format(type(string)) ) elif string == cls.name: return cls() else: raise TypeError( "Cannot construct a '{}' from '{}'".format(cls.__name__, string) ) @classmethod def construct_array_type(cls): return SearchArray def __repr__(self): return 'TermsDtype()' @property def na_value(self): return Terms({}) def valid_value(self, value): return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms) register_extension_dtype(TermsDtype) def ws_tokenizer(string): if pd.isna(string): return [] if not isinstance(string, str): raise ValueError("Expected a string") return string.split() def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray): tfs = {} labeled_posns = {} for term_idx in row.cols: term = term_dict.get_term(term_idx) tfs[term] = 1 enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id) labeled_posns[term] = enc_term_posns result = Terms(tfs, posns=labeled_posns, doc_len=doc_len, encoded=True) return result class SearchArray(ExtensionArray): """An array of tokenized text (Termss).""" dtype = TermsDtype() def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True): # Check dtype, raise TypeError if not is_list_like(postings): raise TypeError("Expected list-like object, got {}".format(type(postings))) self.avoid_copies = avoid_copies self.tokenizer = tokenizer self.term_mat, self.posns, \ self.term_dict, self.avg_doc_length, \ self.doc_lens = build_index_from_terms_list(postings, Terms) @classmethod def index(cls, array: Iterable, tokenizer=ws_tokenizer, truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray': """Index an array of strings using tokenizer.""" if not is_list_like(array): raise TypeError("Expected list-like object, got {}".format(type(array))) term_mat, posns, term_dict, avg_doc_length, doc_lens =\ build_index_from_tokenizer(array, tokenizer, batch_size=batch_size, truncate=truncate) postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies) postings.term_mat = term_mat postings.posns = posns postings.term_dict = term_dict postings.avg_doc_length = avg_doc_length postings.doc_lens = doc_lens return postings @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).""" if dtype is not None: if not isinstance(dtype, TermsDtype): return scalars if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype(): return cls(scalars) # String types elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US': return cls(scalars) # Other objects elif isinstance(scalars, np.ndarray) and scalars.dtype != object: return scalars return cls(scalars) def memory_usage(self, deep=False): """Return memory usage of this array in bytes.""" return self.nbytes @property def nbytes(self): return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes def __getitem__(self, key): key = pd.api.indexers.check_array_indexer(self, key) # Want to take rows of term freqs if isinstance(key, numbers.Integral): try: rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, self.posns) except IndexError: raise IndexError("index out of bounds") else: # Construct a sliced view of this array sliced_tfs = self.term_mat.slice(key) sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns arr = SearchArray([], tokenizer=self.tokenizer) arr.term_mat = sliced_tfs arr.doc_lens = self.doc_lens[key] arr.posns = sliced_posns arr.term_dict = self.term_dict arr.avg_doc_length = self.avg_doc_length return arr def __setitem__(self, key, value): """Set an item in the array.""" key = pd.api.indexers.check_array_indexer(self, key) if isinstance(value, pd.Series): value = value.values if isinstance(value, pd.DataFrame): value = value.values.flatten() if isinstance(value, SearchArray): value = value.to_numpy() if isinstance(value, list): value = np.asarray(value, dtype=object) if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value): raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}") # Cant set a single value to an array if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray): raise ValueError("Cannot set a single value to an array") try: is_encoded = False posns = None term_mat = np.asarray([]) doc_lens = np.asarray([]) if isinstance(value, float): term_mat = np.asarray([value]) doc_lens = np.asarray([0]) elif isinstance(value, Terms): term_mat = np.asarray([value.tf_to_dense(self.term_dict)]) doc_lens = np.asarray([value.doc_len]) is_encoded = value.encoded posns = [value.raw_positions(self.term_dict)] elif isinstance(value, np.ndarray): term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value]) doc_lens = np.asarray([x.doc_len for x in value]) is_encoded = value[0].encoded if len(value) > 0 else False posns = [x.raw_positions(self.term_dict) for x in value] np.nan_to_num(term_mat, copy=False, nan=0) self.term_mat[key] = term_mat self.doc_lens[key] = doc_lens if posns is not None: self.posns.insert(key, posns, is_encoded) # Assume we have a positions for each term, doc pair. We can just update it. # Otherwise we would have added new terms except TermMissingError: self._add_new_terms(key, value) def _add_new_terms(self, key, value): msg = """Adding new terms! This might not be good if you tokenized this new text with a different tokenizer. Also. This is slow.""" warnings.warn(msg) scan_value = value if isinstance(value, Terms): scan_value = np.asarray([value]) for row in scan_value: for term in row.terms(): self.term_dict.add_term(term[0]) self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict))) # Ensure posns_lookup has at least max self.posns self[key] = value def value_counts( self, dropna: bool = True, ): if dropna: counts = Counter(self[:]) counts.pop(Terms({}), None) else: counts = Counter(self[:]) return pd.Series(counts) def __len__(self): len_rval = len(self.term_mat.rows) return len_rval def __ne__(self, other): if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented return ~(self == other) def __eq__(self, other): """Return a boolean numpy array indicating elementwise equality.""" # When other is a dataframe or series, not implemented if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented # When other is an ExtensionArray if isinstance(other, SearchArray): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) else: # Compatible term dicts, and same term freqs # (not looking at positions, maybe we should?) if self.term_dict.compatible(other.term_dict): return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens) else: return np.zeros(len(self), dtype=bool) # return np.array(self[:]) == np.array(other[:]) # When other is a scalar value elif isinstance(other, Terms): other = SearchArray([other], tokenizer=self.tokenizer) warnings.warn("Comparing a scalar value to a SearchArray. This is slow.") return np.array(self[:]) == np.array(other[:]) # When other is a sequence but not an ExtensionArray # its an array of dicts elif is_list_like(other): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) # We actually don't know how it was tokenized other = SearchArray(other, tokenizer=self.tokenizer) return np.array(self[:]) == np.array(other[:]) # Return False where 'other' is neither the same length nor a scalar else: return np.full(len(self), False) def isna(self): # Every row with all 0s empties = self.doc_lens == 0 return empties def take(self, indices, allow_fill=False, fill_value=None): # Want to take rows of term freqs row_indices = np.arange(len(self.term_mat.rows)) # Take within the row indices themselves result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1) if allow_fill and -1 in result_indices: if fill_value is None or pd.isna(fill_value): fill_value = Terms({}, encoded=True) to_fill_mask = result_indices == -1 # This is slow as it rebuilds all the term dictionaries # on the subsequent assignment lines # However, this case tends to be the exception for # most dataframe operations taken = SearchArray([fill_value] * len(result_indices)) taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy() return taken else: taken = self[result_indices].copy() return taken def copy(self): postings_arr = SearchArray([], tokenizer=self.tokenizer) postings_arr.doc_lens = self.doc_lens.copy() postings_arr.term_mat = self.term_mat.copy() postings_arr.posns = self.posns postings_arr.term_dict = self.term_dict postings_arr.avg_doc_length = self.avg_doc_length if not self.avoid_copies: postings_arr.posns = self.posns.copy() postings_arr.term_dict = self.term_dict.copy() return postings_arr @classmethod def _concat_same_type(cls, to_concat): concatenated_data = np.concatenate([ea[:] for ea in to_concat]) return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer) @classmethod def _from_factorized(cls, values, original): return cls(values) def _values_for_factorize(self): """Return an array and missing value suitable for factorization (ie grouping).""" arr = np.asarray(self[:], dtype=object) return arr, Terms({}) def _check_token_arg(self, token): if isinstance(token, str): return token elif isinstance(token, list) and len(token) == 1: return token[0] elif isinstance(token, list): return token else: raise TypeError("Expected a string or list of strings for phrases") # *********************************************************** # Search functionality # *********************************************************** def termfreqs(self, token: Union[List[str], str]) -> np.ndarray: token = self._check_token_arg(token) if isinstance(token, list): return self.phrase_freq(token) try: term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) mask = np.isin(self.term_mat.rows, doc_ids) matches[mask] = termfreqs return matches else: doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) matches[doc_ids] = termfreqs return matches except TermMissingError: return np.zeros(len(self), dtype=int) def docfreq(self, token: str) -> int: if not isinstance(token, str): raise TypeError("Expected a string") # Count number of rows where the term appears try: return self.posns.docfreq(self.term_dict.get_term_id(token)) except TermMissingError: return 0 def doclengths(self) -> np.ndarray: return self.doc_lens def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray: """Return a boolean numpy array indicating which elements contain the given term.""" token = self._check_token_arg(token) if isinstance(token, list): term_freq = self.phrase_freq(token) else: term_freq = self.termfreqs(token) return term_freq > 0 def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray: """Score each doc using a similarity function. Parameters ---------- token : str or list of str of what to search (already tokenized) similarity : How to score the documents. Default is BM25. """ # Get term freqs per token token = self._check_token_arg(token) # For expensive toknes, we compute doc freq first, so we # cache them in the DF cache, to let TF cache know it should be cached tokens_l = [token] if isinstance(token, str) else token all_dfs = np.asarray([self.docfreq(token) for token in tokens_l]) tfs = self.termfreqs(token) token = self._check_token_arg(token) doc_lens = self.doclengths() scores = similarity(term_freqs=tfs, doc_freqs=all_dfs, doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length, num_docs=len(self)) return scores def positions(self, token: str, key=None) -> List[np.ndarray]: """Return a list of lists of positions of the given term.""" term_id = self.term_dict.get_term_id(token) key = self.term_mat.rows[key] if key is not None else self.term_mat.rows posns = self.posns.positions(term_id, doc_ids=key) return posns def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.ones(len(self), dtype=bool) for curr_mask <fim_suffix> masks: mask = mask & curr_mask return mask def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.sum(masks, axis=0) >= min_should_match return mask def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray: if slop == 1 and len(tokens) == len(set(tokens)): phrase_freqs = np.zeros(len(self)) try: doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs) except TermMissingError: return phrase_freqs else: return self.phrase_freq_every_diff(tokens, slop=slop) def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray: if mask is None: mask = self.and_query(tokens) if np.sum(mask) == 0: return mask # Gather positions posns = [self.positions(token, mask) for token in tokens] phrase_freqs = np.zeros(len(self)) phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop) return phrase_freqs def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray: phrase_freqs = -np.ones(len(self)) mask = self.and_query(tokens) phrase_freqs[~mask] = 0 if np.sum(mask) == 0: return phrase_freqs term_posns = [self.positions(term, mask) for term in tokens] for width in [10, 20, 30, 40]: phrase_freqs[mask] = compute_phrase_freqs(term_posns, phrase_freqs[mask], slop=slop, width=width) remaining_mask = phrase_freqs == -1 if np.any(remaining_mask): remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop) phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask] return phrase_freqs <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np import sortednp as snp from copy import deepcopy from typing import List, Tuple, Dict, Union, cast, Optional from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique import numbers import logging from collections import defaultdict from searcharray.utils.bitcount import bit_count64 logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) encoder = RoaringishEncoder() # To not constantly type coerce _64 = np.uint64(64) _2 = np.uint64(2) _1 = np.uint64(1) _0 = np.uint64(0) _neg1 = np.int64(-1) MAX_POSN = encoder.max_payload def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ lhs_int, rhs_int = encoder.intersect(lhs, rhs) lhs_doc_ids = encoder.keys(lhs_int) if len(lhs_int) != len(rhs_int): raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.") if len(lhs_int) == 0: return phrase_freqs, rhs_int same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0]) if same_term: # Find adjacent matches rhs_shift = rhs_int << _1 overlap = lhs_int & rhs_shift overlap = encoder.payload_lsb(overlap) adjacents = bit_count64(overlap).astype(np.int64) adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide phrase_freqs[lhs_doc_ids] += adjacents return phrase_freqs, rhs_int overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1) rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask)) phrase_freqs2 = phrase_freqs.copy() matches2 = overlap_bits > 0 if np.any(matches2): transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1 transitions = np.insert(transitions, 0, 0) counted_bits = bit_count64(overlap_bits[matches2]) reduced = np.add.reduceat(counted_bits, transitions) phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced return phrase_freqs2, rhs_next2 def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1) lhs_doc_ids = encoder.keys(lhs_int) # lhs lsb set and rhs lsb's most significant bit set upper_bit = _1 << (encoder.payload_lsb_bits - _1) matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0) unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True) phrase_freqs[unique] += counts rhs_next = rhs_int rhs_next[~matches] |= ~encoder.payload_lsb_mask rhs_next[matches] |= (encoder.payload_lsb_mask & _1) return phrase_freqs, rhs_next def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Count bigram matches between two encoded arrays. Returns: -------- count: number of matches per doc rhs_next: the next rhs array to continue matching """ # Combine lhs and rhs matches from two strategies phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs) phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs) rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj])) # Combine return phrase_freqs, rhs_next def trim_phrase_search(encoded_posns: List[np.ndarray], phrase_freqs: np.ndarray) -> List[np.ndarray]: """Trim long phrases by searching the rarest terms first.""" # Start with rarest term shortest_keys = None shortest_idx = None min_len = 1e100 max_len = 0 for idx, enc_posn in enumerate(encoded_posns): if len(enc_posn) < min_len: shortest_keys = encoder.keys(enc_posn) shortest_idx = idx min_len = len(enc_posn) if len(enc_posn) > max_len: max_len = len(enc_posn) if shortest_keys is None: return encoded_posns for enc_posn_idx in range(len(encoded_posns)): if enc_posn_idx == shortest_idx: continue if len(encoded_posns[enc_posn_idx]) > (10 * min_len): encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx], shortest_keys) return encoded_posns def compute_phrase_freqs(encoded_posns: List[np.ndarray], phrase_freqs: np.ndarray) -> np.ndarray: if len(encoded_posns) < 2: raise ValueError("phrase must have at least two terms") # Trim long phrases by searching the rarest terms first if len(encoded_posns) > 3: encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs) mask = np.ones(len(phrase_freqs), dtype=bool) lhs = encoded_posns[0] for rhs <fim_suffix> encoded_posns[1:]: # Only count the count of the last bigram (ignoring the ones where priors did not match) phrase_freqs[mask] = 0 phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs) mask &= (phrase_freqs > 0) phrase_freqs[~mask] = 0 return phrase_freqs class PosnBitArrayFromFlatBuilder: """ Build from sorted array shape num terms x 3. 0th is term id 1st is doc id 2nd is posn Sorted by term id then posns """ def __init__(self, flat_array: np.ndarray): self.flat_array = flat_array def build(self): """Slice the flat array into a 2d array of doc ids and posns.""" term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1 term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]]) encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64), boundaries=term_boundaries[:-1], payload=self.flat_array[2].view(np.uint64)) term_ids = self.flat_array[0][term_boundaries[:-1]] encoded_term_posns = {} for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])): sliced = encoded[beg_idx:end_idx] encoded_term_posns[term_ids[into_terms]] = sliced return PosnBitArray(encoded_term_posns, self.flat_array[1].max()) class PosnBitArrayBuilder: def __init__(self): self.term_posns = defaultdict(list) self.term_posn_doc_ids = defaultdict(list) self.max_doc_id = 0 def add_posns(self, doc_id: int, term_id: int, posns: List[int]): doc_ids = [doc_id] * len(posns) self.term_posns[term_id].extend(posns) self.term_posn_doc_ids[term_id].extend(doc_ids) def ensure_capacity(self, doc_id): self.max_doc_id = max(self.max_doc_id, doc_id) def build(self, check=False): encoded_term_posns = {} for term_id, posns in self.term_posns.items(): if len(posns) == 0: posns = np.asarray([], dtype=np.uint32).flatten() elif isinstance(posns, list): posns_arr = np.asarray(posns, dtype=np.uint32).flatten() posns = posns_arr doc_ids = self.term_posn_doc_ids[term_id] if isinstance(doc_ids, list): doc_ids = np.asarray(doc_ids, dtype=np.uint32) encoded = encoder.encode(keys=doc_ids, payload=posns) if check: decode_again = encoder.decode(encoded) docs_to_posns = dict(decode_again) doc_ids_again = [] posns_again = [] for doc_id, posns_dec in docs_to_posns.items(): for posn in posns_dec: doc_ids_again.append(doc_id) posns_again.append(posn) assert np.array_equal(doc_ids_again, doc_ids) assert np.array_equal(posns, posns_again) encoded_term_posns[term_id] = encoded return PosnBitArray(encoded_term_posns, self.max_doc_id) class PosnBitArrayAlreadyEncBuilder: def __init__(self): self.encoded_term_posns = {} self.max_doc_id = 0 def add_posns(self, doc_id: int, term_id: int, posns): self.encoded_term_posns[term_id] = posns def ensure_capacity(self, doc_id): self.max_doc_id = max(self.max_doc_id, doc_id) def build(self, check=False): return PosnBitArray(self.encoded_term_posns, self.max_doc_id) def index_range(rng, key): if key is None: return rng if isinstance(rng, np.ndarray): return rng[key] if isinstance(key, slice): return rng[key] elif isinstance(key, numbers.Number): return rng[key] elif isinstance(key, np.ndarray): try: # UNFORTUNATE COPY r_val = np.asarray(list(rng))[key] return r_val except IndexError as e: raise e # Last resort # UNFORTUNATE COPY # Here probably elipses or a tuple of various things return np.asarray(list(rng))[key] class PosnBitArray: def __init__(self, encoded_term_posns, max_doc_id: int): self.encoded_term_posns = encoded_term_posns self.max_doc_id = max_doc_id self.docfreq_cache : Dict[int, np.uint64] = {} self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {} def copy(self): new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id) return new def concat(self, other): """Merge other into self. Assumes other's doc ids are not overlapping with self's doc ids. """ # Shared terms shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys())) for term_id in shared_terms: # Append then sort self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]]) self.encoded_term_posns[term_id].sort() only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys())) for term_id in only_other_terms: self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id] self.max_doc_id = max(self.max_doc_id, other.max_doc_id) # Empty caches self.termfreq_cache = {} self.docfreq_cache = {} def slice(self, key): sliced_term_posns = {} doc_ids = convert_keys(key) max_doc_id = np.max(doc_ids) for term_id, posns in self.encoded_term_posns.items(): encoded = self.encoded_term_posns[term_id] assert len(encoded.shape) == 1 sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids) return PosnBitArray(sliced_term_posns, max_doc_id) def __getitem__(self, key): return self.slice(key) def merge(self, other): # Unique terms unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys())) for term_id in unique_terms: if term_id not in other.encoded_term_posns: continue elif term_id not in self.encoded_term_posns: self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id] else: posns_self = self.encoded_term_posns[term_id] posns_other = other.encoded_term_posns[term_id] self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other) self.max_doc_id = self.max_doc_id + other.max_doc_id # Empty caches self.termfreq_cache = {} self.docfreq_cache = {} def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray: term_posns = encoder.slice(self.encoded_term_posns[term_id], keys=np.asarray([doc_id], dtype=np.uint64)) return term_posns def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray, doc_ids: np.ndarray) -> np.ndarray: if len(term_ids) < 2: raise ValueError("Must have at least two terms") if phrase_freqs.shape[0] == self.max_doc_id + 1: enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids] else: enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id], keys=doc_ids.view(np.uint64)) for term_id in term_ids] return compute_phrase_freqs(enc_term_posns, phrase_freqs) def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]: if isinstance(doc_ids, numbers.Number): doc_ids = np.asarray([doc_ids]) try: np_doc_ids = convert_keys(doc_ids) term_posns = encoder.slice(self.encoded_term_posns[term_id], keys=np_doc_ids) except KeyError: r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids] if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number): return [r_val[0]] return r_val decoded = encoder.decode(encoded=term_posns, get_keys=True) if len(decoded) == 0: return [np.array([], dtype=np.uint32)] if len(decoded) != len(doc_ids): # Fill non matches decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded) as_dict: Dict[np.uint64, np.ndarray] = dict(decoded) decs = [] for doc_id in doc_ids: if doc_id in as_dict: decs.append(as_dict[doc_id]) else: decs.append(np.array([], dtype=np.uint32)) return decs else: decs = [dec[1] for dec in decoded] return decs def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]: """Count term freqs using unique positions.""" if doc_ids is None: return self._termfreqs_with_cache(term_id) encoded = self.encoded_term_posns[term_id] term_posns = encoded term_posns = encoder.slice(encoded, keys=doc_ids.astype(np.uint64)) return self._computed_term_freqs(term_posns) def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]: doc_ids = encoder.keys(term_posns) change_indices = np.nonzero(np.diff(doc_ids))[0] change_indices = np.concatenate((np.asarray([0]), change_indices + 1)) posns = term_posns & encoder.payload_lsb_mask bit_counts = bit_count64(posns) term_freqs = np.add.reduceat(bit_counts, change_indices) return sorted_unique(doc_ids), term_freqs def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]: try: return self.termfreq_cache[term_id] except KeyError: term_posns = self.encoded_term_posns[term_id] doc_ids, term_freqs = self._computed_term_freqs(term_posns) if self._is_cached(term_id): self.termfreq_cache[term_id] = (doc_ids, term_freqs) return doc_ids, term_freqs def _is_cached(self, term_id: int) -> bool: return term_id in self.docfreq_cache def _docfreq_from_cache(self, term_id: int) -> np.uint64: return self.docfreq_cache[term_id] def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64): if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100): self.docfreq_cache[term_id] = docfreq def docfreq(self, term_id: int) -> np.uint64: try: return self.docfreq_cache[term_id] except KeyError: encoded = self.encoded_term_posns[term_id] docfreq = np.uint64(encoder.keys_unique(encoded).size) self._maybe_cache_docfreq(term_id, docfreq) return docfreq def insert(self, key, term_ids_to_posns, is_encoded=False): new_posns = PosnBitArrayBuilder() if is_encoded: new_posns = PosnBitArrayAlreadyEncBuilder() max_doc_id = 0 for doc_id, new_posns_row in enumerate(term_ids_to_posns): for term_id, positions in new_posns_row: new_posns.add_posns(doc_id, term_id, positions) max_doc_id = max(doc_id, max_doc_id) new_posns.max_doc_id = max_doc_id ins_arr = new_posns.build() self.merge(ins_arr) @property def nbytes(self): arr_bytes = 0 for doc_id, posn in self.encoded_term_posns.items(): arr_bytes += posn.nbytes for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items(): arr_bytes += doc_ids.nbytes arr_bytes += term_freqs.nbytes for term_id, docfreq in self.docfreq_cache.items(): arr_bytes += docfreq.nbytes return arr_bytes <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field <fim_suffix> field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def <fim_suffix> error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain <fim_middle>checked_parse_int(value,
checked_parse_int(value,
METHOD
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate <fim_suffix> types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>base
base
BLOCK_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if <fim_suffix> tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>self._is_subclass_of_generic(target_type,
self._is_subclass_of_generic(target_type,
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if <fim_suffix> '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>hasattr(target_type,
hasattr(target_type,
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if <fim_suffix> tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>not isinstance(value,
not isinstance(value,
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if <fim_suffix> list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>self._is_subclass_of_generic(target_type,
self._is_subclass_of_generic(target_type,
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki.trackers.dataset_worker import DatasetWorker from tanuki.models.function_config import FunctionConfig # PATCH_FILE_EXTENSION_TYPE = Literal[".patches"] # ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"] # POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"] # NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"] # # PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches" # ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments" # POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives" # NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives" # # EXPECTED_ITEMS = 10000 # FALSE_POSITIVE_RATE = 0.01 # LIB_NAME = "tanuki" # ENVVAR = "TANUKI_LOG_DIR" class ABCBufferedLogger(DatasetWorker): def __init__(self, name, level=15): self.buffers = {} self.mapped_files = {} self.miss_count = 0 self.hit_count = 0 self.flush_limit = {} self.buffer_rolling_size = {} self.write_count = 0 self.write_limit = 1000 # Save the Bloom filter every 1000 writes super().__init__(name, level) self.bloom_filter = self.create_bloom_filter() self.load_bloom_filter() self.default_function_config = FunctionConfig() @abstractmethod def get_bloom_filter_persistence(self) -> IBloomFilterPersistence: """ Get an instance of the bloom filter persistence provider. This exposes some persistent file storage, that must support reading and writing raw byte streams. :return: """ pass @abstractmethod def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]: """ Get the lengths of all datasets backing the registered functions, including aligns. :return: """ pass @abstractmethod def ensure_persistence_location_exists(self): """ Ensure that the place we will be writing to actually exists. If not, create it. """ pass @abstractmethod def get_patch_location_for_function(self, func_hash, extension="") -> str: """ Get the address of the function patch file. :param func_hash: The representation of the function :param extension: Whether this is a patch or an alignment :return: """ pass @abstractmethod def write(self, path, data, mode="a") -> None: pass @abstractmethod def read(self, path) -> str: pass @abstractmethod def get_hash_from_path(self, path) -> str: pass @abstractmethod def does_object_exist(self, path) -> bool: pass def create_bloom_filter(self): bloom_filter_persistence = self.get_bloom_filter_persistence() bloom_filter = BloomFilter( bloom_filter_persistence, expected_number_of_elements=EXPECTED_ITEMS, false_positive_probability=FALSE_POSITIVE_RATE) return bloom_filter def load_bloom_filter(self): try: self.bloom_filter.load() except FileNotFoundError: self.debug("No Bloom filter found. Creating a new one.") def write_symbolic_align_call(self, func_hash, example) -> bool: log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool: if positive: log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION) else: log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def log_embeddable_align(self, func_hash, example, positive=True, **kws): """ Log a contrastive function invocation Args: func_hash: A string representation of the function signature and input parameters example: The example object positive: Whether the example is positive or negative **kws: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_embeddable_align_call(func_hash, example, positive) return successfully_saved, new_datapoint def log_symbolic_align(self, func_hash, *args, **kws): """ Log an align function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param args: Example objects :param kws: :return: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint example = args[0] # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_symbolic_align_call(func_hash, example) return successfully_saved, new_datapoint def log_symbolic_patch(self, func_hash, example): """ Log a patched function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param example: :return: """ if not isinstance(func_hash, str): func_hash = str(func_hash) example_data = str(example.__dict__).encode('utf-8') + b'\n' bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8') # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): self.hit_count += 1 return {} self.miss_count += 1 # Add to Bloom Filter self.bloom_filter.add(bloom_filter_representation) try: self.ensure_persistence_location_exists() except Exception as e: return {} log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION) if log_file_path <fim_suffix> self.buffers: self.buffers[log_file_path] = bytearray() if log_file_path not in self.flush_limit: self.flush_limit[log_file_path] = 1 self.buffers[log_file_path].extend(example_data) self.write_count += 1 if log_file_path not in self.buffer_rolling_size: self.buffer_rolling_size[log_file_path] = 1 else: self.buffer_rolling_size[log_file_path] += 1 if self.write_count >= self.write_limit: written_datapoints = self.flush() self.save_bloom_filter() self.write_count = 0 # Reset counter return written_datapoints if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB written_datapoints = {} try: self.write(log_file_path, self.buffers[log_file_path], mode="a+b") # update buffers written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path] self.buffers[log_file_path].clear() self.buffer_rolling_size[log_file_path] = 0 self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path] self.save_bloom_filter() except Exception as e: pass return written_datapoints return {} def save_bloom_filter(self): try: self.bloom_filter.save() except Exception as e: self.warning("Could not save Bloom filter: {}".format(e)) def flush(self): # get log directory written_datapoints = {} for log_file_path, buffer in self.buffers.items(): if len(buffer) > 0: try: self.write(log_file_path, buffer, mode="a+b") written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path] self.buffer_rolling_size[log_file_path] = 0 buffer.clear() except Exception as e: pass return written_datapoints def load_function_config(self, func_hash): """ Get the config file for the function. Uses the message and log directory Config file has to be in .json """ default = False try: # try to get the config from the disk. If inaccessible, create a new default one self.ensure_persistence_location_exists() log_file_path = self.get_patch_location_for_function(func_hash) config_path = f"{log_file_path}.json" if not self.does_object_exist(config_path): function_config = self.default_function_config default = True func_config_dict = function_config.to_dict() # remove teacher_models from the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) else: function_config = FunctionConfig().load_from_dict(self.read_json(config_path)) except Exception as e: function_config = self.default_function_config default = True return function_config, default def update_function_config(self, func_hash, config_to_be_saved): """ Save the config file """ log_file_path = self.get_patch_location_for_function(func_hash) config_path = f"{log_file_path}.json" try: func_config_dict = config_to_be_saved.to_dict() # remove teacher_models from the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) except Exception as e: pass def write_json(self, path, data): self.write(path, json.dumps(data)) def read_json(self, path): return json.loads(self.read(path)) <fim_middle>not in
not in
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.function_example import FunctionExample from tanuki.models.language_model_output import LanguageModelOutput from tanuki.utils import approximate_token_count from tanuki.validator import Validator from tanuki.models.api_manager import APIManager from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig import logging class LanguageModelManager(object): """ The LanguageModelManager is responsible for managing the language models and their outputs operationally, this includes: - Generating outputs from the language models - Repairing outputs from the language models - Saving outputs from the language models - Finetuning the language models from the saved outputs """ def __init__(self, function_modeler: FunctionModeler, api_provider: APIManager, generation_token_limit=512,) -> None: self.api_provider = api_provider self.function_modeler = function_modeler self.default_generation_length = generation_token_limit self.initialized_functions = {} self.token_counts = {} def __call__(self, args, function_description: FunctionDescription, kwargs, validator: Validator, generation_parameters: dict) -> Any: # add the generation length if not there if "max_new_tokens" not in generation_parameters: generation_parameters["max_new_tokens"] = self.default_generation_length output = self.generate(args, kwargs, function_description, generation_parameters) # start parsing the object, very hacky way for the time being choice_parsed = self._parse_choice(output) valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: choice, choice_parsed, successful_repair = self.repair_output(args, kwargs, function_description, output.generated_response, validator, generation_parameters) if not successful_repair: raise TypeError( f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'") output.generated_response = choice output.distilled_model = False datapoint = FunctionExample(args, kwargs, output.generated_response) if output.suitable_for_finetuning and not output.distilled_model: self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description, datapoint, repaired=not valid) instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint) return instantiated def _parse_choice(self, output): try: # json load choice_parsed = json.loads(output.generated_response) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(output.generated_response) except: choice_parsed = output.generated_response return choice_parsed def generate(self, args, kwargs, function_description, llm_parameters={}): """ The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset """ func_hash = function_description.__hash__() prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs, function_description, llm_parameters, func_hash) # loggings current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements if current_function_setup: generator_model = current_function_setup["model"] if is_distilled_model: logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model == "": logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model != model.model_name: logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.") self.initialized_functions[func_hash]["model"] = model.model_name choice = self._synthesise_answer(prompt, model, llm_parameters) output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model) return output def _synthesise_answer(self, prompt, model, llm_parameters): """ Synthesise an answer given the prompt, model, model_type and llm_parameters Args: prompt (str): The prompt to send to the model model (BaseModelConfig): The model to use for generation llm_parameters (dict): The parameters to use for generation return: choice (str): The generated response """ system_message = model.system_message return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters) def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash): """ Get the generation case with the correct prompt and model First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count """ f = str(function_description.__dict__.__repr__()) distilled_model, teacher_models = self.function_modeler.get_models(function_description) is_distilled_model = distilled_model.model_name != "" suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f, distilled_model) if <fim_suffix> not in self.initialized_functions: # initialise the initialized_functions dict self.initialized_functions[func_hash] = {"model": "", "examples": []} # no examples needed, using a finetuned model. Dont save to finetune dataset if is_distilled_model and suitable_for_distillation: prompt = self.construct_prompt(f, args, kwargs, [], distilled_model) return prompt, distilled_model, suitable_for_distillation, True else: aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16) examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] # update the examples in the initialized_functions dict self.initialized_functions[func_hash]["examples"] = examples examples_token_count = sum([approximate_token_count(example) for example in examples]) generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(teacher_models, examples_token_count + input_prompt_token_count + generation_tokens, len(examples)) if model: examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in aligns] prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model) return prompt, model, suitable_for_distillation, False else: raise ValueError( "The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000") def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig): """ Check if the inputs are suitable for finetuning, i.e are below the finetuning token count """ # check if finetunable finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" input_prompt_token_count = approximate_token_count(finetuning_prompt) if distilled_model.system_message_token_count < 0: distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message) if distilled_model.instruction_token_count < 0: distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions) suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length return suitable_for_finetune, input_prompt_token_count def construct_prompt(self, f, args, kwargs, examples, model): """ Construct a prompt given the model, function description, args, kwargs and examples Args: model (BaseModelConfig): The model to use for generation f (str): The function description args (tuple): The args of the function kwargs (tuple): The kwargs of the function examples (list): The examples of the function Returns: content (str): The prompt to send to the model """ if examples: final_examples = "\n".join( [f"{align}" for align in examples]) example_input = f"Examples:{final_examples}\n" else: example_input = "" instruction_prompt = model.instructions content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" return content def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters): """ Repair the output given the input, function description, failed outputs list, examples and models """ # get the token counts examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] examples_token_count = sum([approximate_token_count(example) for example in examples]) failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list]) input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:") generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(models, examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count, len(examples)) if model: prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model) logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}") choice = self._synthesise_answer(prompt, model, llm_parameters) return choice else: return None def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model): """ Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples """ if examples: final_examples = "\n".join( [f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in examples]) successful_examples = f"Examples:{final_examples}\n" else: successful_examples = "" failed_examples = "" for failed_output in failed_outputs_list: failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n" end_token_addition = "" if model.parsing_helper_tokens["end_token"]: end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output." prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:" return prompt def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0): """ Choose a model from the models given the token count and number of examples Args: models (list): The models to choose from input_token_count (int): The token count of the input nr_of_examples (int): The number of examples Returns: model (BaseModelConfig): The chosen model """ for model in models: # check if input token count is less than the context length # If the model config has custom messages, then use those, otherwise use the default ones if model.system_message_token_count < 0: model.system_message_token_count = approximate_token_count(model.system_message) if model.instruction_token_count < 0: model.instruction_token_count = approximate_token_count(model.instructions) if model.parsing_helper_tokens["start_token"]: input_token_count += 2*nr_of_examples if model.parsing_helper_tokens["end_token"]: input_token_count += 2*nr_of_examples total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count if total_token_count < model.context_length: return model return None def repair_output(self, args: tuple, kwargs: dict, function_description: FunctionDescription, choice, validator: Validator, generation_parameters: dict) -> tuple: """ Repair an output, that failed type validation by generating a new output using the teacher model and the error Args: args (tuple): The args of the function kwargs (dict): The kwargs of the function function_description (FunctionDescription): The function description choice: The output that failed type validation, type is arbitrary validator (Validator): The validator object Returns: choice (str): The choice that was generated by the language model choice_parsed: The parsed choice, type is arbitrary valid (bool): Whether the output was correctly repaired was valid """ # get the teacher models teacher_models = self.function_modeler.get_models(function_description)[1] valid = False retry_index = 5 f = str(function_description.__dict__.__repr__() + "\n") error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'" # instantiate the failed outputs list failed_outputs_list = [(choice, error)] while retry_index > 0 and not valid: # get the alignments aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5) # Generate the reparied LLM output choice = self.repair_generate(args, kwargs, f, failed_outputs_list, aligns, teacher_models, generation_parameters) if not choice: # if no choice then the input was too long for the model # no specific error but the retry index goes down retry_index -= 1 continue # start parsing the object try: # json load choice_parsed = json.loads(choice) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(choice) except: choice_parsed = choice valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: # if it's not valid, add it to the failed outputs list error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'" failed_outputs_list.append((choice, error)) retry_index -= 1 if valid: logging.info(f"Successfully repaired output.") return choice, choice_parsed, valid <fim_middle>func_hash
func_hash
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if <fim_suffix> Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>self.is_base_type(target_type) or target_type is
self.is_base_type(target_type) or target_type is
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if <fim_suffix> is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>origin
origin
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin <fim_suffix> list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>==
==
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if <fim_suffix> == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>origin
origin
IF
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os from bitarray._bitarray import bitarray from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence class BloomFilterFileSystemDriver(IBloomFilterPersistence): """ This is a Filesystem implementation of a Bloom Filter persistence layer. """ def __init__(self, log_directory: str): self.log_directory = log_directory def save(self, bit_array: bitarray) -> None: """ Write a bloom filter array of bits to the local filesystem. :param bloom_filter: A bloom filter which tracks unique function invocations """ bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin') # Append 0 bits to make the length a multiple of 8 while len(bit_array) % 8 != 0: bit_array.append(0) with open(bloom_filter_path, 'wb') as f: f.write(bit_array.tobytes()) def load(self) -> bitarray: """ Load a bloom filter from the local filesystem. :return: A bloom filter object containing the state of unique function invocations """ bloom_filter_path = <fim_suffix> 'bloom_filter_state.bin') with open(bloom_filter_path, 'rb') as f: bit_array = bitarray() bit_array.frombytes(f.read()) while len(bit_array) % 8 != 0: bit_array.append(0) return bit_array<fim_middle>os.path.join(self.log_directory,
os.path.join(self.log_directory,
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin <fim_suffix> get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>=
=
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast import datetime import io import json from typing import List, Tuple, Dict, Union import logging from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \ NEGATIVE_EMBEDDABLE_ALIGNMENTS, OPENAI_PROVIDER from tanuki.models.function_type import FunctionType from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_EMBEDDING_MODELS, DEFAULT_STUDENT_MODELS from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API from tanuki.models.finetune_job import FinetuneJob from tanuki.models.function_description import FunctionDescription from tanuki.models.function_example import FunctionExample from tanuki.trackers.dataset_worker import DatasetWorker from tanuki.utils import approximate_token_count, prepare_object_for_saving, encode_int, decode_int import copy from tanuki.models.function_config import FunctionConfig from tanuki.models.api_manager import APIManager class FunctionModeler(object): """ This class manages the registered function models and their datasets comprised of symbolic and embeddable alignments, and symbolic and embeddable patches """ def __init__(self, data_worker: DatasetWorker, api_provider: APIManager, environment_id=0, ) -> None: self.function_configs <fim_suffix> {} self.data_worker = data_worker self.distillation_token_limit = 3000 # the token limit for finetuning self.symbolic_align_buffer = {} self.embeddable_align_buffer = {} self._get_datasets() self.environment_id = environment_id self.check_finetune_blacklist = [] self.execute_finetune_blacklist = [] self.store_data_blacklist = [] self.api_provider = api_provider self.teacher_models_override = {} self.student_model_override = {} self.startup_logging_checker = {} def _get_dataset_info(self, dataset_type, func_hash, type="length"): """ Get the dataset size for a function hash """ return self.data_worker.load_dataset(dataset_type, func_hash, return_type=type) def _configure_function_models(self, teacher_models: List[Union[str, BaseModelConfig]], student_model: str, func_hash: str, task_type: str): """ Configure the function models """ if teacher_models: self._configure_teacher_models(teacher_models, func_hash, task_type) if student_model: self._configure_student_model(student_model, func_hash, task_type) if teacher_models and not student_model: for model_config in self.teacher_models_override[func_hash]: # ban all non-openai models from finetuning if teacher is not openai and student is not specified because it doesnt make sense if model_config.provider != OPENAI_PROVIDER and func_hash not in self.check_finetune_blacklist: self.check_finetune_blacklist.append(func_hash) if model_config.provider != OPENAI_PROVIDER and func_hash not in self.execute_finetune_blacklist: self.execute_finetune_blacklist.append(func_hash) def _configure_teacher_models(self, teacher_models: List[Union[str, BaseModelConfig]], func_hash: str, task_type: str): """ Add custom teacher models to the function config First this is added to the teacher_models_override dict, which is used to override the teacher models Args: teacher_models: A list of teacher models to use for the function hash func_hash: The function hash to add the teacher models to """ if func_hash not in self.teacher_models_override: self.teacher_models_override[func_hash] = [] if task_type == FunctionType.EMBEDDABLE: preconfigured_models = DEFAULT_EMBEDDING_MODELS elif task_type == FunctionType.SYMBOLIC: preconfigured_models = DEFAULT_TEACHER_MODELS for model in teacher_models: if isinstance(model, str): if model not in preconfigured_models: raise Exception(f"Teacher model {model} not supported by default. Please include it in the list in extended config format") model_config = preconfigured_models[model] elif isinstance(model, BaseModelConfig): model_config = model self.teacher_models_override[func_hash].append(model_config) def _configure_student_model(self, student_model: str, func_hash: str, task_type: str): """ Add custom student models to the function config First this is added to the teacher_models_override dict, which is used to override the teacher models Args: teacher_models: A list of teacher models to use for the function hash func_hash: The function hash to add the teacher models to """ if task_type == FunctionType.EMBEDDABLE: logging.info("Embeddable function type does not support student models") preconfigured_models = DEFAULT_STUDENT_MODELS if student_model not in preconfigured_models: raise Exception(f"Student model {student_model} is currently not supported.") model_config = preconfigured_models[student_model] self.student_model_override[func_hash] = model_config def _get_datasets(self): """ Get the existing datasets from the data worker """ self.dataset_sizes = self.data_worker.load_existing_datasets() def save_embeddable_align_statements(self, function_hash: str, args, kwargs, positive_pairs: List[Tuple[List, Dict]], negative_pairs: List[Tuple[List, Dict]]): """ Save the contrastive align statements for the embeddable function. Do not save if the function hash is in the store data blacklist Args: function_hash: A unique hash for the function args: The arguments of the function kwargs: The keyword arguments of the function positive_pairs: A list of the other function invocations that are should have equivalent embeddings negative_pairs: A list of the other function invocations that are should have different embeddings """ # prepare args and kwargs for saving copy_args = copy.deepcopy(args) copy_kwargs = copy.deepcopy(kwargs) parsed_args = prepare_object_for_saving(copy_args) parsed_kwargs = prepare_object_for_saving(copy_kwargs) # prepare positive pairs for saving parsed_positive_pairs = [] for pair in positive_pairs: copy_pair = copy.deepcopy(pair) parsed_pair = prepare_object_for_saving(copy_pair) parsed_positive_pairs.append(parsed_pair) # prepare negative pairs for saving parsed_negative_pairs = [] for pair in negative_pairs: copy_pair = copy.deepcopy(pair) parsed_pair = prepare_object_for_saving(copy_pair) parsed_negative_pairs.append(parsed_pair) # save the contrastive pairs for pair in parsed_positive_pairs: self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=True) for pair in parsed_negative_pairs: self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=False) def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True): """ Save a contrastive pair """ example = FunctionExample(args, kwargs, pair) if function_hash not in self.store_data_blacklist: successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive) else: successfully_saved = False new_datapoint = True if successfully_saved: if positive: if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]: self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1 else: self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1 if not positive: if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]: self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1 else: self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1 if new_datapoint: # update align buffer if function_hash not in self.embeddable_align_buffer: self.embeddable_align_buffer[function_hash] = bytearray() self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n') def save_symbolic_align_statements(self, function_hash, args, kwargs, output): """ Save the align statements and add to the align buffer Do not save if the function hash is in the store data blacklist Then just add the datapoints to the align buffer """ # prepare output for saving and later parsing # make a deepcopy of the output to avoid changing the original object copy_output = copy.deepcopy(output) parsed_output = prepare_object_for_saving(copy_output) # prepare args and kwargs for saving copy_args = copy.deepcopy(args) copy_kwargs = copy.deepcopy(kwargs) parsed_args = prepare_object_for_saving(copy_args) parsed_kwargs = prepare_object_for_saving(copy_kwargs) example = FunctionExample(parsed_args, parsed_kwargs, parsed_output) if function_hash not in self.store_data_blacklist: successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example) else: successfully_saved = False new_datapoint = True if successfully_saved: if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]: self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1 else: self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1 if new_datapoint: # update align buffer if function_hash not in self.symbolic_align_buffer: self.symbolic_align_buffer[function_hash] = bytearray() self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n') def save_symbolic_datapoint(self, func_hash, example): """ Save datapoint to the training data """ written_datapoints = self.data_worker.log_symbolic_patch(func_hash, example) for func_hash, datapoints in written_datapoints.items(): if func_hash in self.dataset_sizes[PATCHES]: # if the dataset size is -1, it means we havent read in the dataset size yet if self.dataset_sizes[PATCHES][func_hash] == -1: self.dataset_sizes[PATCHES][func_hash] = self._get_dataset_info(PATCHES, func_hash, type="length") else: self.dataset_sizes[PATCHES][func_hash] += datapoints else: self.dataset_sizes[PATCHES][func_hash] = datapoints return len(written_datapoints) > 0 def get_symbolic_alignments(self, func_hash, max=20): """ Get all symbolic aligns for a function hash """ if func_hash not in self.symbolic_align_buffer: return [] buffer = self.symbolic_align_buffer[func_hash] return self._get_examples_from_alignment_buffer(buffer, max) def get_embeddable_alignments(self, func_hash, max=20): """ Get all embeddable aligns for a function hash """ if func_hash not in self.embeddable_align_buffer: return [] buffer = self.embeddable_align_buffer[func_hash] return self._get_examples_from_alignment_buffer(buffer, max) def _get_examples_from_alignment_buffer(self, buffer, max=20): """ Get examples from a buffer """ split_buffer = bytes(buffer).split(b"\n") # byte array of stringed python dicts into dict objects example_set = set() for example in split_buffer: if example == b"": continue example_set.add(example) # easy and straightforward way to get nr of words (not perfect but doesnt need to be) # Can do the proper way of tokenizing later, it might be slower and we dont need 100% accuracy example_element_limit = EXAMPLE_ELEMENT_LIMIT examples = [] for example_bytes in split_buffer: if example_bytes in example_set: nr_of_elements = approximate_token_count(example_bytes) example_element_limit -= nr_of_elements if example_element_limit < 0: break example = example_bytes.decode('utf-8') # json load the example try: example = json.loads(example) except: example = ast.literal_eval(example) examples.append(example) example_set.remove(example_bytes) return list(examples)[:max] def load_symbolic_align_statements(self, function_hash): """ Load all align statements First check the data storage blacklist, if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray """ if function_hash in self.store_data_blacklist: self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0 self.symbolic_align_buffer[function_hash] = bytearray() elif function_hash not in self.symbolic_align_buffer: dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both") if align_dataset: self.symbolic_align_buffer[function_hash] = bytearray(align_dataset) self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True): """ Postprocess the datapoint First check if the datapoint should be added to the training data Add the datapoint if it should be added Then check if the function should be finetuned and execute finetuning if it should """ try: if func_hash not in self.store_data_blacklist: added = self.save_symbolic_datapoint(func_hash, example) if added: self._update_datapoint_config(repaired, func_hash) except Exception as e: print(e) print("Could not add datapoint to training data") if func_hash not in self.execute_finetune_blacklist: self.check_for_finetuning(function_description, func_hash) def load_function_config(self, func_hash, function_description): """ Load the config file for a function hash """ config, default = self.data_worker.load_function_config(func_hash) if func_hash in self.student_model_override and config.distilled_model.model_name == "": config.distilled_model = self.student_model_override[func_hash] if default and func_hash not in self.check_finetune_blacklist: finetuned, finetune_config = self._check_for_finetunes(function_description, config.distilled_model) if finetuned: config = finetune_config # update teachers if not default if func_hash in self.teacher_models_override: config.teacher_models = self.teacher_models_override[func_hash] self.function_configs[func_hash] = config return config def _check_for_finetunes(self, function_description: FunctionDescription, model_config : BaseModelConfig) -> Tuple[bool, Dict]: # hash the function_hash into 16 characters (to embed it into the name of OpenAI finetunes, for later retrieval) logging.info(f"Checking for finetunes for {function_description.name} using {model_config.provider}") finetune_hash = function_description.__hash__(purpose="finetune") + encode_int(self.environment_id) # List 10 fine-tuning jobs finetunes: List[FinetuneJob] = self.api_provider[model_config.provider].list_finetuned(model_config, limit=1000) # Check if the function_hash is in the fine-tuning jobs # the finetunes are in chronological order starting from newest # So this gets the latest finetune for finetune in finetunes: # check if the finetune hash is in the fine-tuned model name if finetune.status == "succeeded" and finetune_hash in finetune.fine_tuned_model.model_name: try: config = self._construct_config_from_finetune(finetune_hash, finetune) # save the config self.data_worker.update_function_config(function_description.__hash__(), config) logging.info(f"Found finetuned model for {function_description.name} [{config.distilled_model.model_name}]") return True, config except: logging.info(f"Found finetuned model for {function_description.name} [{finetune.fine_tuned_model.model_name}] but could not load it") return False, {} logging.info(f"No finetuned model found for {function_description.name}") return False, {} def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob): """ Construct a valid function config from a finetune job Args: finetune_hash: The hash of the function finetune: The finetune job Returns: config: The function config """ model = finetune.fine_tuned_model # get the ending location of finetune hash in the model name finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash) # get the next character after the finetune hash next_char = model.model_name[finetune_hash_end] # get the number of training runs nr_of_training_runs = decode_int(next_char) + 1 nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200 config = { "distilled_model": model, "current_model_stats": { "trained_on_datapoints": nr_of_training_points, "running_faults": []}, "last_training_run": {"trained_on_datapoints": nr_of_training_points}, "current_training_run": {}, "teacher_models": [], # default teacher models, will be overwritten if needed "nr_of_training_runs": nr_of_training_runs} config = FunctionConfig().load_from_dict(config) return config def get_models(self, function_description): """ Return the current model from the config file """ func_hash = function_description.__hash__() if func_hash in self.function_configs: func_config = self.function_configs[func_hash] else: func_config = self.load_function_config(func_hash, function_description) return func_config.distilled_model, func_config.teacher_models def _update_datapoint_config(self, repaired, func_hash): """ Update the config to reflect the new datapoint in the training data First adds 1 to the current datapoints Then updates running faults depending if priority is True or not and takes last 100 Then checks the revert condition, i.e if last 10 datapoints are 50% faulty Finally updates the config file Args: priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data """ try: if repaired: self.function_configs[func_hash].current_model_stats["running_faults"].append(1) else: self.function_configs[func_hash].current_model_stats["running_faults"].append(0) # take the last 100 datapoints self.function_configs[func_hash].current_model_stats["running_faults"] = \ self.function_configs[func_hash].current_model_stats["running_faults"][-100:] # check if the last 10 datapoints are 50% faulty, this is the switch condition if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5: self.function_configs[func_hash].distilled_model.model_name = "" self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0 self.function_configs[func_hash].current_model_stats["running_faults"] = [] self._update_config_file(func_hash) except Exception as e: print(e) print("Could not update config file") pass def _update_config_file(self, func_hash): self.data_worker.update_function_config(func_hash, self.function_configs[func_hash]) def check_for_finetuning(self, function_description, func_hash): """ Check for finetuning status If already finetuning, check for finetuning status If not finetuning, check for finetuning condition and execute finetuning if condition is met """ try: # check if already finetuning if "job_id" in self.function_configs[func_hash].current_training_run: # check for job status self._check_finetuning_status(func_hash, function_description) else: # check for finetuning condition if self._check_finetuning_condition(func_hash, function_description): self._execute_finetuning(function_description, func_hash) except Exception as e: print(e) print("Error checking for finetuning") def _check_finetuning_condition(self, func_hash, function_description): """ Check if the finetuning condition is met Currently finetuning condition is dependent on the number of symbolic datapoints since last finetuning """ if func_hash not in self.function_configs: return False training_threshold = (2 ** self.function_configs[func_hash].nr_of_training_runs) * 200 align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[ SYMBOLIC_ALIGNMENTS] else 0 patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0 if patch_dataset_size == -1: # if havent read in the patch dataset size, read it in patch_dataset_size = self._get_dataset_info(PATCHES, func_hash, type="length") self.dataset_sizes[PATCHES][func_hash] = patch_dataset_size if func_hash not in self.startup_logging_checker: logging.info(f"Function {function_description.name} [{align_dataset_size} aligns | {patch_dataset_size} runs] will be finetuned from"\ f" {self.function_configs[func_hash].teacher_models[0].model_name} using {self.function_configs[func_hash].distilled_model.provider} in "\ f"{training_threshold-(patch_dataset_size + align_dataset_size)} runs") self.startup_logging_checker[func_hash] = True return (patch_dataset_size + align_dataset_size) > training_threshold def _execute_finetuning(self, function_description, func_hash): """ Execute the finetuning First create the OpenAI compatible dataset with jsonL file and upload it Then submit the OpenAI finetuning job Finally update the config file to reflect the new finetuning job as current """ # get function description function_string = str(function_description.__dict__.__repr__() + "\n") # get the align dataset align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, func_hash, type="dataset") if not align_dataset: align_dataset = "" else: align_dataset = align_dataset.decode('utf-8') # get the patch dataset patch_dataset = self._get_dataset_info(PATCHES, func_hash, type="dataset") if not patch_dataset: patch_dataset = "" else: patch_dataset = patch_dataset.decode('utf-8') if align_dataset == "" and patch_dataset == "": return dataset = align_dataset + patch_dataset dataset.replace("\\n", "[SEP_TOKEN]") dataset = dataset.split("\n") dataset = [x.replace("[SEP_TOKEN]", "\\n") for x in dataset if x != ""] # read in the dataset file dataset = [ast.literal_eval(x) for x in dataset] # # create the openai dataset instruction = "You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format." finetuning_dataset = [{"messages": [ { "role": "system", "content": f"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types." }, {"role": "user", "content": f"{instruction}\nFunction: {function_string}---\nInputs:\nArgs: {x['args']}\nKwargs: {x['kwargs']}\nOutput:"}, {"role": "assistant", "content": str(x['output']) if x['output'] is not None else "None"}]} for x in dataset] # Create an in-memory text stream temp_file = io.BytesIO() # Write data to the stream for idx, item in enumerate(finetuning_dataset): temp_file.write(json.dumps(item).encode('utf-8')) if idx != len(finetuning_dataset) - 1: temp_file.write("\n".encode('utf-8')) # Reset the stream position to the beginning temp_file.seek(0) # create the finetune hash finetune_hash = function_description.__hash__(purpose="finetune") nr_of_training_runs = self.function_configs[func_hash].nr_of_training_runs finetune_hash += encode_int(self.environment_id) finetune_hash += encode_int(nr_of_training_runs) # here can be sure that datasets were read in as that is checked in the finetune_check align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[ SYMBOLIC_ALIGNMENTS] else 0 patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0 total_dataset_size = align_dataset_size + patch_dataset_size # Use the stream as a file try: finetune_provider = self.function_configs[func_hash].distilled_model.provider logging.info(f"Starting finetuning for {function_description.name} using {finetune_provider} for {self.function_configs[func_hash].distilled_model.base_model_for_sft}") finetuning_response: FinetuneJob = self.api_provider[finetune_provider].finetune(file=temp_file, suffix=finetune_hash, model_config = self.function_configs[func_hash].distilled_model,) except Exception as e: logging.info(f"Could not start finetuning for {function_description.name} using {finetune_provider}. Error: {e}") return self.function_configs[func_hash].current_training_run = {"job_id": finetuning_response.id, "trained_on_datapoints": total_dataset_size, "last_checked": datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S")} # update the config json file try: self._update_config_file(func_hash) except Exception as e: print(e) print("Could not update config file to register a finetuning run") def _check_finetuning_status(self, func_hash, function_description): """ Check the status of the current finetuning job If the job is finished, update the config file to reflect the new model """ job_id = self.function_configs[func_hash].current_training_run["job_id"] last_checked = self.function_configs[func_hash].current_training_run["last_checked"] # check if last checked was more than 30 mins ago if (datetime.datetime.now() - datetime.datetime.strptime(last_checked, "%Y-%m-%d %H:%M:%S")).total_seconds() > 1800: finetune_provider = self.function_configs[func_hash].distilled_model.provider response = self.api_provider[finetune_provider].get_finetuned(job_id, model_config = self.function_configs[func_hash].distilled_model) self.function_configs[func_hash].current_training_run["last_checked"] = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S") if response.status == "succeeded" or response.status == "failed": self._update_finetune_config(response, func_hash, function_description) else: self._update_config_file(func_hash) def _update_finetune_config(self, response: FinetuneJob, func_hash, function_description): """ Update the config file to reflect the new model and switch the current model to the finetuned model """ self.function_configs[func_hash].update_with_finetuned_response(response) logging.info(f"Finetuning for {function_description.name} using {self.function_configs[func_hash].distilled_model.provider} finished with status: {response.status}."\ f" The id of the finetuned model is {response.fine_tuned_model.model_name}") try: self._update_config_file(func_hash) except Exception as e: logging.info(f"Could not update the function configuration file with the finetuned model for {function_description.name}. Error: {e}") pass <fim_middle>=
=
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki.trackers.dataset_worker import DatasetWorker from tanuki.models.function_config import FunctionConfig # PATCH_FILE_EXTENSION_TYPE = Literal[".patches"] # ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"] # POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"] # NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"] # # PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches" # ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments" # POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives" # NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives" # # EXPECTED_ITEMS = 10000 # FALSE_POSITIVE_RATE = 0.01 # LIB_NAME = "tanuki" # ENVVAR = "TANUKI_LOG_DIR" class ABCBufferedLogger(DatasetWorker): def __init__(self, name, level=15): self.buffers = {} self.mapped_files = {} self.miss_count = 0 self.hit_count = 0 self.flush_limit = {} self.buffer_rolling_size = {} self.write_count = 0 self.write_limit = 1000 # Save the Bloom filter every 1000 writes super().__init__(name, level) self.bloom_filter = self.create_bloom_filter() self.load_bloom_filter() self.default_function_config = FunctionConfig() @abstractmethod def get_bloom_filter_persistence(self) -> IBloomFilterPersistence: """ Get an instance of the bloom filter persistence provider. This exposes some persistent file storage, that must support reading and writing raw byte streams. :return: """ pass @abstractmethod def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]: """ Get the lengths of all datasets backing the registered functions, including aligns. :return: """ pass @abstractmethod def ensure_persistence_location_exists(self): """ Ensure that the place we will be writing to actually exists. If not, create it. """ pass @abstractmethod def get_patch_location_for_function(self, func_hash, extension="") -> str: """ Get the address of the function patch file. :param func_hash: The representation of the function :param extension: Whether this is a patch or an alignment :return: """ pass @abstractmethod def write(self, path, data, mode="a") -> None: pass @abstractmethod def read(self, path) -> str: pass @abstractmethod def get_hash_from_path(self, path) -> str: pass @abstractmethod def does_object_exist(self, path) -> bool: pass def create_bloom_filter(self): bloom_filter_persistence = self.get_bloom_filter_persistence() bloom_filter = BloomFilter( bloom_filter_persistence, expected_number_of_elements=EXPECTED_ITEMS, false_positive_probability=FALSE_POSITIVE_RATE) return bloom_filter def load_bloom_filter(self): try: self.bloom_filter.load() except FileNotFoundError: self.debug("No Bloom filter found. Creating a new one.") def write_symbolic_align_call(self, func_hash, example) -> bool: log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool: if positive: log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION) else: log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def log_embeddable_align(self, func_hash, example, positive=True, **kws): """ Log a contrastive function invocation Args: func_hash: A string representation of the function signature and input parameters example: The example object positive: Whether the example is positive or negative **kws: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_embeddable_align_call(func_hash, example, positive) return successfully_saved, new_datapoint def log_symbolic_align(self, func_hash, *args, **kws): """ Log an align function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param args: Example objects :param kws: :return: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint example = args[0] # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_symbolic_align_call(func_hash, example) return successfully_saved, new_datapoint def log_symbolic_patch(self, func_hash, example): """ Log a patched function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param example: :return: """ if not isinstance(func_hash, str): func_hash = str(func_hash) example_data = str(example.__dict__).encode('utf-8') + b'\n' bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8') # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): self.hit_count += 1 return {} self.miss_count += 1 # Add to Bloom Filter self.bloom_filter.add(bloom_filter_representation) try: self.ensure_persistence_location_exists() except Exception as e: return {} log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION) if log_file_path not in self.buffers: self.buffers[log_file_path] = bytearray() if log_file_path not in self.flush_limit: self.flush_limit[log_file_path] = 1 self.buffers[log_file_path].extend(example_data) self.write_count += 1 if log_file_path not in self.buffer_rolling_size: self.buffer_rolling_size[log_file_path] = 1 else: self.buffer_rolling_size[log_file_path] += 1 if self.write_count >= self.write_limit: written_datapoints = self.flush() self.save_bloom_filter() self.write_count = 0 # Reset counter return written_datapoints if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB written_datapoints = {} try: self.write(log_file_path, self.buffers[log_file_path], mode="a+b") # update buffers written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path] self.buffers[log_file_path].clear() self.buffer_rolling_size[log_file_path] = 0 self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path] self.save_bloom_filter() except Exception as e: pass return written_datapoints return {} def save_bloom_filter(self): try: self.bloom_filter.save() except Exception as e: self.warning("Could not save Bloom filter: {}".format(e)) def flush(self): # get log directory written_datapoints = {} for log_file_path, buffer in self.buffers.items(): if len(buffer) > 0: try: self.write(log_file_path, buffer, mode="a+b") written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path] self.buffer_rolling_size[log_file_path] = 0 buffer.clear() except Exception as e: pass return written_datapoints def load_function_config(self, func_hash): """ Get the config file for the function. Uses the message and log directory Config file has to be in .json """ default = False try: # try to get the config from the disk. If inaccessible, create a new default one self.ensure_persistence_location_exists() log_file_path = self.get_patch_location_for_function(func_hash) config_path <fim_suffix> f"{log_file_path}.json" if not self.does_object_exist(config_path): function_config = self.default_function_config default = True func_config_dict = function_config.to_dict() # remove teacher_models from the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) else: function_config = FunctionConfig().load_from_dict(self.read_json(config_path)) except Exception as e: function_config = self.default_function_config default = True return function_config, default def update_function_config(self, func_hash, config_to_be_saved): """ Save the config file """ log_file_path = self.get_patch_location_for_function(func_hash) config_path = f"{log_file_path}.json" try: func_config_dict = config_to_be_saved.to_dict() # remove teacher_models from the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) except Exception as e: pass def write_json(self, path, data): self.write(path, json.dumps(data)) def read_json(self, path): return json.loads(self.read(path)) <fim_middle>=
=
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type <fim_suffix> args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>=
=
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = <fim_suffix> get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>get_args(target_type)[0] if
get_args(target_type)[0] if
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if <fim_suffix> Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>item_types else
item_types else
STATEMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os from enum import Enum from typing import Literal, Union, Optional, Dict from appdirs import user_data_dir from tanuki.constants import * from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki.persistence.filter.filesystem_bloom import BloomFilterFileSystemDriver from tanuki.trackers.abc_buffered_logger import ABCBufferedLogger class FilesystemBufferedLogger(ABCBufferedLogger): """ A class that handles the reading and writing of patch invocations and align statements. It includes the logic for a bloom filter, to ensure that we only store unique invocations. """ def __init__(self, name, level=15): self.log_directory = self._get_log_directory() super().__init__(name, level) def get_bloom_filter_persistence(self) -> IBloomFilterPersistence: """ Get an instance of the bloom filter persistence provider. Typically this will be a file system provider. :return: A persistence provider """ return BloomFilterFileSystemDriver(log_directory=self.log_directory) def get_patch_location_for_function(self, func_hash, extension: Union[ ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str: """ Get the local location of the function patch file. :param func_hash: The representation of the function :param extension: Whether this is a patch or an alignment :return: """ return os.path.join(self.log_directory, func_hash + extension) def ensure_persistence_location_exists(self) -> None: """ Ensure that the location on the filesystem we will be writing to actually exists. If not, create it. """ log_directory = self.log_directory # Create the folder if it doesn't exist if not os.path.exists(log_directory): os.makedirs(log_directory) def does_object_exist(self, path: str) -> bool: """ Check to see if a path exists on the filesystem. :param path: :return: """ return os.path.exists(path) def _get_log_directory(self) -> str: """ Find a location on the filesystem to write our logs to. :return: """ filename = "functions" # If explicitly defined env_dir = os.getenv(ENVVAR) if env_dir and os.path.isdir(env_dir): return os.path.join(env_dir, filename) # If installed as a library library_dir = os.path.join(user_data_dir(LIB_NAME), filename) if os.path.isdir(library_dir) or not os.path.exists(library_dir): return library_dir # If installed in a project that contains a git repo - place it in the same folder as <fim_suffix> git repo current_dir = os.getcwd() while current_dir != os.path.root: if ".git" in os.listdir(current_dir): return os.path.join(current_dir, filename) current_dir = os.path.dirname(current_dir) return os.path.join(os.getcwd(), filename) def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]: """ Get the size of the dataset for a function hash """ log_directory = self._get_log_directory() dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION, "positive": POSITIVE_FILE_EXTENSION, "negative": NEGATIVE_FILE_EXTENSION, "patches": PATCH_FILE_EXTENSION} log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type]) if not os.path.exists(log_file_path): if return_type == "both": return 0, None elif return_type == "dataset": return None elif return_type == "length": return 0 try: with open(log_file_path, "rb") as f: dataset = f.read() dataset_string = repr(dataset) dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n") if return_type == "both": return dataset_length, dataset elif return_type == "dataset": return dataset elif return_type == "length": return dataset_length except Exception as e: if return_type == "both": return 0, None elif return_type == "dataset": return None elif return_type == "length": return 0 def load_existing_datasets(self) -> Dict[str, Dict[str, str]]: log_directory = self.log_directory dataset_lengths = { SYMBOLIC_ALIGNMENTS: {}, POSITIVE_EMBEDDABLE_ALIGNMENTS: {}, NEGATIVE_EMBEDDABLE_ALIGNMENTS: {}, PATCHES: {}, } try: if not os.path.exists(log_directory): os.makedirs(log_directory) # get all the files in the log directory files = os.listdir(log_directory) # discard all .json files files = [x for x in files if ".json" not in x] except Exception as e: return dataset_lengths for file in files: if ALIGN_FILE_EXTENSION not in file \ and PATCH_FILE_EXTENSION not in file \ and POSITIVE_FILE_EXTENSION not in file \ and NEGATIVE_FILE_EXTENSION not in file: continue elif ALIGN_FILE_EXTENSION in file: dataset_type = SYMBOLIC_ALIGNMENTS elif POSITIVE_FILE_EXTENSION in file: dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS elif NEGATIVE_FILE_EXTENSION in file: dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS else: dataset_type = PATCHES func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "") dataset_lengths[dataset_type][func_hash] = -1 return dataset_lengths def write(self, path: str, data: str, mode: Literal["w", "a", "a+b"] = "w") -> None: """ Write data to a file """ with open(path, mode) as f: f.write(data) def read(self, path: str) -> str: """ Read data from a file """ with open(path, "r") as f: return f.read() def get_hash_from_path(self, path) -> str: """ Given a path with a hash, return only the hash :param path: The path to the file :return: The hash """ return path.replace(PATCH_FILE_EXTENSION, ""). \ replace(self.log_directory, ""). \ lstrip("/"). \ lstrip("\\") <fim_middle>the
the
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # <fim_suffix> datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>try to instantiate
try to instantiate
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki.trackers.dataset_worker import DatasetWorker from tanuki.models.function_config import FunctionConfig # PATCH_FILE_EXTENSION_TYPE = Literal[".patches"] # ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"] # POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"] # NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"] # # PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches" # ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments" # POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives" # NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives" # # EXPECTED_ITEMS = 10000 # FALSE_POSITIVE_RATE = 0.01 # LIB_NAME = "tanuki" # ENVVAR = "TANUKI_LOG_DIR" class ABCBufferedLogger(DatasetWorker): def __init__(self, name, level=15): self.buffers = {} self.mapped_files = {} self.miss_count = 0 self.hit_count = 0 self.flush_limit = {} self.buffer_rolling_size = {} self.write_count = 0 self.write_limit = 1000 # Save the Bloom filter every 1000 writes super().__init__(name, level) self.bloom_filter = self.create_bloom_filter() self.load_bloom_filter() self.default_function_config = FunctionConfig() @abstractmethod def get_bloom_filter_persistence(self) -> IBloomFilterPersistence: """ Get an instance of the bloom filter persistence provider. This exposes some persistent file storage, that must support reading and writing raw byte streams. :return: """ pass @abstractmethod def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]: """ Get the lengths of all datasets backing the registered functions, including aligns. :return: """ pass @abstractmethod def ensure_persistence_location_exists(self): """ Ensure that the place we will be writing to actually exists. If not, create it. """ pass @abstractmethod def get_patch_location_for_function(self, func_hash, extension="") -> str: """ Get the address of the function patch file. :param func_hash: The representation of the function :param extension: Whether this is a patch or an alignment :return: """ pass @abstractmethod def write(self, path, data, mode="a") -> None: pass @abstractmethod def read(self, path) -> str: pass @abstractmethod def get_hash_from_path(self, path) -> str: pass @abstractmethod def does_object_exist(self, path) -> bool: pass def create_bloom_filter(self): bloom_filter_persistence = self.get_bloom_filter_persistence() bloom_filter = BloomFilter( bloom_filter_persistence, expected_number_of_elements=EXPECTED_ITEMS, false_positive_probability=FALSE_POSITIVE_RATE) return bloom_filter def load_bloom_filter(self): try: self.bloom_filter.load() except FileNotFoundError: self.debug("No Bloom filter found. Creating a new one.") def write_symbolic_align_call(self, func_hash, example) -> bool: log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool: if positive: log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION) else: log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION) try: # Now, write to the file dumpable_object = str(example.__dict__) self.write(log_file_path, dumpable_object + "\n", mode="a") return True except Exception as e: return False def log_embeddable_align(self, func_hash, example, positive=True, **kws): """ Log a contrastive function invocation Args: func_hash: A string representation of the function signature and input parameters example: The example object positive: Whether the example is positive or negative **kws: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_embeddable_align_call(func_hash, example, positive) return successfully_saved, new_datapoint def log_symbolic_align(self, func_hash, *args, **kws): """ Log an align function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param args: Example objects :param kws: :return: """ successfully_saved, new_datapoint = False, False try: self.ensure_persistence_location_exists() except Exception as e: return successfully_saved, new_datapoint example = args[0] # prepend the function hash to the example bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n' # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): return successfully_saved, new_datapoint new_datapoint = True # add to bloom filter self.bloom_filter.add(bloom_filter_representation) self.save_bloom_filter() successfully_saved = self.write_symbolic_align_call(func_hash, example) return successfully_saved, new_datapoint def log_symbolic_patch(self, func_hash, example): """ Log a patched function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param example: :return: """ if not isinstance(func_hash, str): func_hash = str(func_hash) example_data = str(example.__dict__).encode('utf-8') + b'\n' bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8') # Check Bloom Filter if self.bloom_filter.lookup(bloom_filter_representation): self.hit_count += 1 return {} self.miss_count += 1 # Add to Bloom Filter self.bloom_filter.add(bloom_filter_representation) try: self.ensure_persistence_location_exists() except Exception as e: return {} log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION) if log_file_path not in self.buffers: self.buffers[log_file_path] = bytearray() if log_file_path not in self.flush_limit: self.flush_limit[log_file_path] = 1 self.buffers[log_file_path].extend(example_data) self.write_count += 1 if log_file_path not in self.buffer_rolling_size: self.buffer_rolling_size[log_file_path] = 1 else: self.buffer_rolling_size[log_file_path] += 1 if self.write_count >= self.write_limit: written_datapoints = self.flush() self.save_bloom_filter() self.write_count = 0 # Reset counter return written_datapoints if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB written_datapoints = {} try: self.write(log_file_path, self.buffers[log_file_path], mode="a+b") # update buffers written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path] self.buffers[log_file_path].clear() self.buffer_rolling_size[log_file_path] = 0 self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path] self.save_bloom_filter() except Exception as e: pass return written_datapoints return {} def save_bloom_filter(self): try: self.bloom_filter.save() except Exception as e: self.warning("Could not save Bloom filter: {}".format(e)) def flush(self): # get log directory written_datapoints = {} for log_file_path, buffer in self.buffers.items(): if len(buffer) > 0: try: self.write(log_file_path, buffer, mode="a+b") written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path] self.buffer_rolling_size[log_file_path] = 0 buffer.clear() except Exception as e: pass return written_datapoints def load_function_config(self, func_hash): """ Get the config file for the function. Uses the message and log directory Config file has to be in .json """ default = False try: # try to get the config from the disk. If inaccessible, create a new default one self.ensure_persistence_location_exists() log_file_path = self.get_patch_location_for_function(func_hash) config_path = f"{log_file_path}.json" if not self.does_object_exist(config_path): function_config = self.default_function_config default = True func_config_dict = function_config.to_dict() # <fim_suffix> the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) else: function_config = FunctionConfig().load_from_dict(self.read_json(config_path)) except Exception as e: function_config = self.default_function_config default = True return function_config, default def update_function_config(self, func_hash, config_to_be_saved): """ Save the config file """ log_file_path = self.get_patch_location_for_function(func_hash) config_path = f"{log_file_path}.json" try: func_config_dict = config_to_be_saved.to_dict() # remove teacher_models from the config func_config_dict.pop("teacher_models") self.write_json(config_path, func_config_dict) except Exception as e: pass def write_json(self, path, data): self.write(path, json.dumps(data)) def read_json(self, path): return json.loads(self.read(path)) <fim_middle>remove teacher_models from
remove teacher_models from
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.function_example import FunctionExample from tanuki.models.language_model_output import LanguageModelOutput from tanuki.utils import approximate_token_count from tanuki.validator import Validator from tanuki.models.api_manager import APIManager from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig import logging class LanguageModelManager(object): """ The LanguageModelManager is responsible for managing the language models and their outputs operationally, this includes: - Generating outputs from the language models - Repairing outputs from the language models - Saving outputs from the language models - Finetuning the language models from the saved outputs """ def __init__(self, function_modeler: FunctionModeler, api_provider: APIManager, generation_token_limit=512,) -> None: self.api_provider = api_provider self.function_modeler = function_modeler self.default_generation_length = generation_token_limit self.initialized_functions = {} self.token_counts = {} def __call__(self, args, function_description: FunctionDescription, kwargs, validator: Validator, generation_parameters: dict) -> Any: # add the generation length if not there if "max_new_tokens" not in generation_parameters: generation_parameters["max_new_tokens"] = self.default_generation_length output = self.generate(args, kwargs, function_description, generation_parameters) # start parsing the object, very hacky way for the time being choice_parsed = self._parse_choice(output) valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: choice, choice_parsed, successful_repair = self.repair_output(args, kwargs, function_description, output.generated_response, validator, generation_parameters) if not successful_repair: raise TypeError( f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'") output.generated_response = choice output.distilled_model = False datapoint = FunctionExample(args, kwargs, output.generated_response) if output.suitable_for_finetuning and not output.distilled_model: self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description, datapoint, repaired=not valid) instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint) return instantiated def _parse_choice(self, output): try: # json load choice_parsed = json.loads(output.generated_response) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(output.generated_response) except: choice_parsed = output.generated_response return choice_parsed def generate(self, args, kwargs, function_description, llm_parameters={}): """ The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset """ func_hash = function_description.__hash__() prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs, function_description, llm_parameters, func_hash) # loggings current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements if current_function_setup: generator_model = current_function_setup["model"] if is_distilled_model: logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model == "": logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model != model.model_name: logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.") self.initialized_functions[func_hash]["model"] = model.model_name choice = self._synthesise_answer(prompt, model, llm_parameters) output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model) return output def _synthesise_answer(self, prompt, model, llm_parameters): """ Synthesise an answer given the prompt, model, model_type and llm_parameters Args: prompt (str): The prompt to send to the model model (BaseModelConfig): The model to use for generation llm_parameters (dict): The parameters to use for generation return: choice (str): The generated response """ system_message = model.system_message return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters) def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash): """ Get the generation case with the correct prompt and model First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count """ f = str(function_description.__dict__.__repr__()) distilled_model, teacher_models = self.function_modeler.get_models(function_description) is_distilled_model = distilled_model.model_name != "" suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f, distilled_model) if func_hash not in self.initialized_functions: # initialise <fim_suffix> initialized_functions dict self.initialized_functions[func_hash] = {"model": "", "examples": []} # no examples needed, using a finetuned model. Dont save to finetune dataset if is_distilled_model and suitable_for_distillation: prompt = self.construct_prompt(f, args, kwargs, [], distilled_model) return prompt, distilled_model, suitable_for_distillation, True else: aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16) examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] # update the examples in the initialized_functions dict self.initialized_functions[func_hash]["examples"] = examples examples_token_count = sum([approximate_token_count(example) for example in examples]) generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(teacher_models, examples_token_count + input_prompt_token_count + generation_tokens, len(examples)) if model: examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in aligns] prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model) return prompt, model, suitable_for_distillation, False else: raise ValueError( "The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000") def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig): """ Check if the inputs are suitable for finetuning, i.e are below the finetuning token count """ # check if finetunable finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" input_prompt_token_count = approximate_token_count(finetuning_prompt) if distilled_model.system_message_token_count < 0: distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message) if distilled_model.instruction_token_count < 0: distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions) suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length return suitable_for_finetune, input_prompt_token_count def construct_prompt(self, f, args, kwargs, examples, model): """ Construct a prompt given the model, function description, args, kwargs and examples Args: model (BaseModelConfig): The model to use for generation f (str): The function description args (tuple): The args of the function kwargs (tuple): The kwargs of the function examples (list): The examples of the function Returns: content (str): The prompt to send to the model """ if examples: final_examples = "\n".join( [f"{align}" for align in examples]) example_input = f"Examples:{final_examples}\n" else: example_input = "" instruction_prompt = model.instructions content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" return content def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters): """ Repair the output given the input, function description, failed outputs list, examples and models """ # get the token counts examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] examples_token_count = sum([approximate_token_count(example) for example in examples]) failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list]) input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:") generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(models, examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count, len(examples)) if model: prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model) logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}") choice = self._synthesise_answer(prompt, model, llm_parameters) return choice else: return None def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model): """ Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples """ if examples: final_examples = "\n".join( [f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in examples]) successful_examples = f"Examples:{final_examples}\n" else: successful_examples = "" failed_examples = "" for failed_output in failed_outputs_list: failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n" end_token_addition = "" if model.parsing_helper_tokens["end_token"]: end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output." prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:" return prompt def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0): """ Choose a model from the models given the token count and number of examples Args: models (list): The models to choose from input_token_count (int): The token count of the input nr_of_examples (int): The number of examples Returns: model (BaseModelConfig): The chosen model """ for model in models: # check if input token count is less than the context length # If the model config has custom messages, then use those, otherwise use the default ones if model.system_message_token_count < 0: model.system_message_token_count = approximate_token_count(model.system_message) if model.instruction_token_count < 0: model.instruction_token_count = approximate_token_count(model.instructions) if model.parsing_helper_tokens["start_token"]: input_token_count += 2*nr_of_examples if model.parsing_helper_tokens["end_token"]: input_token_count += 2*nr_of_examples total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count if total_token_count < model.context_length: return model return None def repair_output(self, args: tuple, kwargs: dict, function_description: FunctionDescription, choice, validator: Validator, generation_parameters: dict) -> tuple: """ Repair an output, that failed type validation by generating a new output using the teacher model and the error Args: args (tuple): The args of the function kwargs (dict): The kwargs of the function function_description (FunctionDescription): The function description choice: The output that failed type validation, type is arbitrary validator (Validator): The validator object Returns: choice (str): The choice that was generated by the language model choice_parsed: The parsed choice, type is arbitrary valid (bool): Whether the output was correctly repaired was valid """ # get the teacher models teacher_models = self.function_modeler.get_models(function_description)[1] valid = False retry_index = 5 f = str(function_description.__dict__.__repr__() + "\n") error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'" # instantiate the failed outputs list failed_outputs_list = [(choice, error)] while retry_index > 0 and not valid: # get the alignments aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5) # Generate the reparied LLM output choice = self.repair_generate(args, kwargs, f, failed_outputs_list, aligns, teacher_models, generation_parameters) if not choice: # if no choice then the input was too long for the model # no specific error but the retry index goes down retry_index -= 1 continue # start parsing the object try: # json load choice_parsed = json.loads(choice) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(choice) except: choice_parsed = choice valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: # if it's not valid, add it to the failed outputs list error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'" failed_outputs_list.append((choice, error)) retry_index -= 1 if valid: logging.info(f"Successfully repaired output.") return choice, choice_parsed, valid <fim_middle>the
the
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.function_example import FunctionExample from tanuki.models.language_model_output import LanguageModelOutput from tanuki.utils import approximate_token_count from tanuki.validator import Validator from tanuki.models.api_manager import APIManager from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig import logging class LanguageModelManager(object): """ The LanguageModelManager is responsible for managing the language models and their outputs operationally, this includes: - Generating outputs from the language models - Repairing outputs from the language models - Saving outputs from the language models - Finetuning the language models from the saved outputs """ def __init__(self, function_modeler: FunctionModeler, api_provider: APIManager, generation_token_limit=512,) -> None: self.api_provider = api_provider self.function_modeler = function_modeler self.default_generation_length = generation_token_limit self.initialized_functions = {} self.token_counts = {} def __call__(self, args, function_description: FunctionDescription, kwargs, validator: Validator, generation_parameters: dict) -> Any: # add the generation length if not there if "max_new_tokens" not in generation_parameters: generation_parameters["max_new_tokens"] = self.default_generation_length output = self.generate(args, kwargs, function_description, generation_parameters) # start parsing the object, very hacky way for the time being choice_parsed = self._parse_choice(output) valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: choice, choice_parsed, successful_repair = self.repair_output(args, kwargs, function_description, output.generated_response, validator, generation_parameters) if not successful_repair: raise TypeError( f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'") output.generated_response = choice output.distilled_model = False datapoint = FunctionExample(args, kwargs, output.generated_response) if output.suitable_for_finetuning and not output.distilled_model: self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description, datapoint, repaired=not valid) instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint) return instantiated def _parse_choice(self, output): try: # json load choice_parsed = json.loads(output.generated_response) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(output.generated_response) except: choice_parsed = output.generated_response return choice_parsed def generate(self, args, kwargs, function_description, llm_parameters={}): """ The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset """ func_hash = function_description.__hash__() prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs, function_description, llm_parameters, func_hash) # loggings current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements if current_function_setup: generator_model = current_function_setup["model"] if is_distilled_model: logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model == "": logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.") self.initialized_functions[func_hash]["model"] = model.model_name elif generator_model != model.model_name: logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.") self.initialized_functions[func_hash]["model"] = model.model_name choice = self._synthesise_answer(prompt, model, llm_parameters) output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model) return output def _synthesise_answer(self, prompt, model, llm_parameters): """ Synthesise an answer given the prompt, model, model_type and llm_parameters Args: prompt (str): The prompt to send to the model model (BaseModelConfig): The model to use for generation llm_parameters (dict): The parameters to use for generation return: choice (str): The generated response """ system_message = model.system_message return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters) def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash): """ Get the generation case with the correct prompt and model First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count """ f = str(function_description.__dict__.__repr__()) distilled_model, teacher_models = self.function_modeler.get_models(function_description) is_distilled_model = distilled_model.model_name != "" suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f, distilled_model) if func_hash not in self.initialized_functions: # initialise the initialized_functions dict self.initialized_functions[func_hash] = {"model": "", "examples": []} # no examples needed, using a finetuned model. Dont save to finetune dataset if is_distilled_model and suitable_for_distillation: prompt = self.construct_prompt(f, args, kwargs, [], distilled_model) return prompt, distilled_model, suitable_for_distillation, True else: aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16) examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] # update the <fim_suffix> the initialized_functions dict self.initialized_functions[func_hash]["examples"] = examples examples_token_count = sum([approximate_token_count(example) for example in examples]) generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(teacher_models, examples_token_count + input_prompt_token_count + generation_tokens, len(examples)) if model: examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in aligns] prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model) return prompt, model, suitable_for_distillation, False else: raise ValueError( "The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000") def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig): """ Check if the inputs are suitable for finetuning, i.e are below the finetuning token count """ # check if finetunable finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" input_prompt_token_count = approximate_token_count(finetuning_prompt) if distilled_model.system_message_token_count < 0: distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message) if distilled_model.instruction_token_count < 0: distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions) suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length return suitable_for_finetune, input_prompt_token_count def construct_prompt(self, f, args, kwargs, examples, model): """ Construct a prompt given the model, function description, args, kwargs and examples Args: model (BaseModelConfig): The model to use for generation f (str): The function description args (tuple): The args of the function kwargs (tuple): The kwargs of the function examples (list): The examples of the function Returns: content (str): The prompt to send to the model """ if examples: final_examples = "\n".join( [f"{align}" for align in examples]) example_input = f"Examples:{final_examples}\n" else: example_input = "" instruction_prompt = model.instructions content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:" return content def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters): """ Repair the output given the input, function description, failed outputs list, examples and models """ # get the token counts examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in aligns] examples_token_count = sum([approximate_token_count(example) for example in examples]) failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list]) input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:") generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length) model = self.choose_model_from_tokens(models, examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count, len(examples)) if model: prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model) logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}") choice = self._synthesise_answer(prompt, model, llm_parameters) return choice else: return None def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model): """ Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples """ if examples: final_examples = "\n".join( [f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in examples]) successful_examples = f"Examples:{final_examples}\n" else: successful_examples = "" failed_examples = "" for failed_output in failed_outputs_list: failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n" end_token_addition = "" if model.parsing_helper_tokens["end_token"]: end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output." prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:" return prompt def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0): """ Choose a model from the models given the token count and number of examples Args: models (list): The models to choose from input_token_count (int): The token count of the input nr_of_examples (int): The number of examples Returns: model (BaseModelConfig): The chosen model """ for model in models: # check if input token count is less than the context length # If the model config has custom messages, then use those, otherwise use the default ones if model.system_message_token_count < 0: model.system_message_token_count = approximate_token_count(model.system_message) if model.instruction_token_count < 0: model.instruction_token_count = approximate_token_count(model.instructions) if model.parsing_helper_tokens["start_token"]: input_token_count += 2*nr_of_examples if model.parsing_helper_tokens["end_token"]: input_token_count += 2*nr_of_examples total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count if total_token_count < model.context_length: return model return None def repair_output(self, args: tuple, kwargs: dict, function_description: FunctionDescription, choice, validator: Validator, generation_parameters: dict) -> tuple: """ Repair an output, that failed type validation by generating a new output using the teacher model and the error Args: args (tuple): The args of the function kwargs (dict): The kwargs of the function function_description (FunctionDescription): The function description choice: The output that failed type validation, type is arbitrary validator (Validator): The validator object Returns: choice (str): The choice that was generated by the language model choice_parsed: The parsed choice, type is arbitrary valid (bool): Whether the output was correctly repaired was valid """ # get the teacher models teacher_models = self.function_modeler.get_models(function_description)[1] valid = False retry_index = 5 f = str(function_description.__dict__.__repr__() + "\n") error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'" # instantiate the failed outputs list failed_outputs_list = [(choice, error)] while retry_index > 0 and not valid: # get the alignments aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5) # Generate the reparied LLM output choice = self.repair_generate(args, kwargs, f, failed_outputs_list, aligns, teacher_models, generation_parameters) if not choice: # if no choice then the input was too long for the model # no specific error but the retry index goes down retry_index -= 1 continue # start parsing the object try: # json load choice_parsed = json.loads(choice) except: # if it fails, it's not a json object, try eval try: choice_parsed = eval(choice) except: choice_parsed = choice valid = validator.check_type(choice_parsed, function_description.output_type_hint) if not valid: # if it's not valid, add it to the failed outputs list error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'" failed_outputs_list.append((choice, error)) retry_index -= 1 if valid: logging.info(f"Successfully repaired output.") return choice, choice_parsed, valid <fim_middle>examples in
examples in
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target <fim_suffix> return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>type is a built-in, attempt to instantiate and
type is a built-in, attempt to instantiate and
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with <fim_suffix> < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>pydantic
pydantic
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none <fim_suffix> data as-is return data <fim_middle>of the above, return the
of the above, return the
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments <fim_suffix> in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>are
are
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os from enum import Enum from typing import Literal, Union, Optional, Dict from appdirs import user_data_dir from tanuki.constants import * from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki.persistence.filter.filesystem_bloom import BloomFilterFileSystemDriver from tanuki.trackers.abc_buffered_logger import ABCBufferedLogger class FilesystemBufferedLogger(ABCBufferedLogger): """ A class that handles the reading and writing of patch invocations and align statements. It includes the logic for a bloom filter, to ensure that we only store unique invocations. """ def __init__(self, name, level=15): self.log_directory = self._get_log_directory() super().__init__(name, level) def get_bloom_filter_persistence(self) -> IBloomFilterPersistence: """ Get an instance of the bloom filter persistence provider. Typically this will be a file system provider. :return: A persistence provider """ return BloomFilterFileSystemDriver(log_directory=self.log_directory) def get_patch_location_for_function(self, func_hash, extension: Union[ ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str: """ Get the local location of the function patch file. :param func_hash: The representation of the function :param extension: Whether this is a patch or an alignment :return: """ return os.path.join(self.log_directory, func_hash + extension) def ensure_persistence_location_exists(self) -> None: """ Ensure that the location on the filesystem we will be writing to actually exists. If not, create it. """ log_directory = self.log_directory # Create the folder if it doesn't exist if not os.path.exists(log_directory): os.makedirs(log_directory) def does_object_exist(self, path: str) -> bool: """ Check to see if a path exists on the filesystem. :param path: :return: """ return os.path.exists(path) def _get_log_directory(self) -> str: """ Find a location on the filesystem to write our logs to. :return: """ filename = "functions" # If explicitly defined env_dir = os.getenv(ENVVAR) if env_dir and os.path.isdir(env_dir): return os.path.join(env_dir, filename) # If <fim_suffix> library library_dir = os.path.join(user_data_dir(LIB_NAME), filename) if os.path.isdir(library_dir) or not os.path.exists(library_dir): return library_dir # If installed in a project that contains a git repo - place it in the same folder as the git repo current_dir = os.getcwd() while current_dir != os.path.root: if ".git" in os.listdir(current_dir): return os.path.join(current_dir, filename) current_dir = os.path.dirname(current_dir) return os.path.join(os.getcwd(), filename) def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]: """ Get the size of the dataset for a function hash """ log_directory = self._get_log_directory() dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION, "positive": POSITIVE_FILE_EXTENSION, "negative": NEGATIVE_FILE_EXTENSION, "patches": PATCH_FILE_EXTENSION} log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type]) if not os.path.exists(log_file_path): if return_type == "both": return 0, None elif return_type == "dataset": return None elif return_type == "length": return 0 try: with open(log_file_path, "rb") as f: dataset = f.read() dataset_string = repr(dataset) dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n") if return_type == "both": return dataset_length, dataset elif return_type == "dataset": return dataset elif return_type == "length": return dataset_length except Exception as e: if return_type == "both": return 0, None elif return_type == "dataset": return None elif return_type == "length": return 0 def load_existing_datasets(self) -> Dict[str, Dict[str, str]]: log_directory = self.log_directory dataset_lengths = { SYMBOLIC_ALIGNMENTS: {}, POSITIVE_EMBEDDABLE_ALIGNMENTS: {}, NEGATIVE_EMBEDDABLE_ALIGNMENTS: {}, PATCHES: {}, } try: if not os.path.exists(log_directory): os.makedirs(log_directory) # get all the files in the log directory files = os.listdir(log_directory) # discard all .json files files = [x for x in files if ".json" not in x] except Exception as e: return dataset_lengths for file in files: if ALIGN_FILE_EXTENSION not in file \ and PATCH_FILE_EXTENSION not in file \ and POSITIVE_FILE_EXTENSION not in file \ and NEGATIVE_FILE_EXTENSION not in file: continue elif ALIGN_FILE_EXTENSION in file: dataset_type = SYMBOLIC_ALIGNMENTS elif POSITIVE_FILE_EXTENSION in file: dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS elif NEGATIVE_FILE_EXTENSION in file: dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS else: dataset_type = PATCHES func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "") dataset_lengths[dataset_type][func_hash] = -1 return dataset_lengths def write(self, path: str, data: str, mode: Literal["w", "a", "a+b"] = "w") -> None: """ Write data to a file """ with open(path, mode) as f: f.write(data) def read(self, path: str) -> str: """ Read data from a file """ with open(path, "r") as f: return f.read() def get_hash_from_path(self, path) -> str: """ Given a path with a hash, return only the hash :param path: The path to the file :return: The hash """ return path.replace(PATCH_FILE_EXTENSION, ""). \ replace(self.log_directory, ""). \ lstrip("/"). \ lstrip("\\") <fim_middle>installed as a
installed as a
LINE_COMMENT
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except <fim_suffix> TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>(ValueError,
(ValueError,
CATCH
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except <fim_suffix> as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>Exception
Exception
CATCH
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except <fim_suffix> TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>(ValueError,
(ValueError,
CATCH
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except <fim_suffix> as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>AttributeError
AttributeError
CATCH
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/language_models/openai_api.py<fim_prefix>from typing import List import logging import time # import abstract base class from openai import OpenAI from openai.types import CreateEmbeddingResponse from openai.types.fine_tuning import FineTuningJob from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API from tanuki.models.embedding import Embedding from tanuki.language_models.embedding_api_abc import Embedding_API from tanuki.language_models.llm_api_abc import LLM_API import os from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME from tanuki.language_models.llm_configs.openai_config import OpenAIConfig from tanuki.models.finetune_job import FinetuneJob import copy OPENAI_URL = "https://api.openai.com/v1/chat/completions" import requests LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"] class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API): def __init__(self) -> None: # initialise the abstract base class super().__init__() self.api_key = os.environ.get("OPENAI_API_KEY") self.client = None def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]: """ Generate embeddings for the provided texts using the specified OpenAI model. Lightweight wrapper over the OpenAI client. :param texts: A list of texts to embed. :param model: The model to use for embeddings. :return: A list of embeddings. """ self.check_api_key() try: response: CreateEmbeddingResponse = self.client.embeddings.create( input=texts, model=model.model_name, **kwargs ) assert response.object == "list" assert len(response.data) == len(texts) embeddings = [] for embedding_response in response.data: assert embedding_response.object == "embedding" embeddings.append(Embedding(embedding_response.embedding)) return embeddings except Exception as e: print(f"An error occurred: {e}") return None def generate(self, model, system_message, prompt, **kwargs): """ The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response Args model (OpenAIConfig): The model to use for generation. system_message (str): The system message to use for generation. prompt (str): The prompt to use for generation. kwargs (dict): Additional generation parameters. """ self.check_api_key() temperature = kwargs.get("temperature", 0.1) top_p = kwargs.get("top_p", 1) frequency_penalty = kwargs.get("frequency_penalty", 0) presence_penalty = kwargs.get("presence_penalty", 0) max_new_tokens = kwargs.get("max_new_tokens") # check if there are any generation parameters that are not supported unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS] if len(unsupported_params) > 0: # log warning logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\ f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}") params = { "model": model.model_name, "temperature": temperature, "max_tokens": max_new_tokens, "top_p": top_p, "frequency_penalty": frequency_penalty, "presence_penalty": presence_penalty, } if model.parsing_helper_tokens["start_token"]: prompt += model.parsing_helper_tokens["start_token"] messages = [ { "role": "system", "content": system_message }, { "role": "user", "content": prompt } ] params["messages"] = messages counter = 0 choice = None # initiate response so exception logic doesnt error out when checking for error in response response = {} while counter <= 5: try: openai_headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", } response = requests.post( OPENAI_URL, headers=openai_headers, json=params, timeout=50 ) response = response.json() choice = response["choices"][0]["message"]["content"].strip("'") break except <fim_suffix> e: if ("error" in response and "code" in response["error"] and response["error"]["code"] == 'invalid_api_key'): raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid") if counter == 5: raise Exception(f"OpenAI API failed to generate a response: {e}") counter += 1 time.sleep(2 ** counter) continue if not choice: raise Exception("OpenAI API failed to generate a response") if model.parsing_helper_tokens["end_token"]: # remove the end token from the choice choice = choice.split(model.parsing_helper_tokens["end_token"])[0] # check if starting token is in choice if model.parsing_helper_tokens["start_token"] in choice: # remove the starting token from the choice choice = choice.split(model.parsing_helper_tokens["start_token"])[-1] return choice def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]: self.check_api_key() response = self.client.fine_tuning.jobs.list(limit=limit) jobs = [] for job in response.data: finetune_job = self.create_finetune_job(job, model_config) jobs.append(finetune_job) return jobs def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob: self.check_api_key() response = self.client.fine_tuning.jobs.retrieve(job_id) finetune_job = self.create_finetune_job(response, model_config= model_config) return finetune_job def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob: self.check_api_key() # Use the stream as a file response = self.client.files.create(file=file, purpose='fine-tune') training_file_id = response.id if not model_config.base_model_for_sft: model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME # submit the finetuning job finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id, model=model_config.base_model_for_sft, suffix=suffix) finetune_job = self.create_finetune_job(finetuning_response, model_config) return finetune_job def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob: finetuned_model_config = copy.deepcopy(model_config) finetuned_model_config.model_name = response.fine_tuned_model finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config) return finetune_job def check_api_key(self): # check if api key is not none if not self.api_key: # try to get the api key from the environment, maybe it has been set later self.api_key = os.getenv("OPENAI_API_KEY") if not self.api_key: raise ValueError("OpenAI API key is not set") if not self.client: self.client = OpenAI(api_key=self.api_key) <fim_middle>Exception as
Exception as
CATCH
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for <fim_suffix> get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>arg in
arg in
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/bloom_filter.py<fim_prefix>import hashlib import logging import math import numpy as np from bitarray import bitarray from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence class BloomFilter: def __init__(self, persistence: IBloomFilterPersistence, size=None, hash_count=None, expected_number_of_elements=None, false_positive_probability=None): if not persistence: raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence") if not size and not hash_count and not expected_number_of_elements and not false_positive_probability: raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability") if expected_number_of_elements and false_positive_probability: size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability) if not size and not hash_count: raise ValueError("Size and hash_count not set. This should never happen.") self.size = size self.hash_count = hash_count self.bit_array, self.indices = self.init_bit_array(size) self.persistence = persistence def init_bit_array(self, size): _bit_array = bitarray(size) _bit_array.setall(0) _indices = np.zeros(size, dtype=np.int32) return _bit_array, _indices def hash_functions(self, string): # h1(x) hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16) # h2(x) hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) return hash1, hash2 def lookup(self, string): hash1, hash2 = self.hash_functions(string) for seed in range(self.hash_count): index = (hash1 + seed * hash2) % self.size #print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}") if self.bit_array[index] == 0: return False return True def add(self, string): hash1, hash2 = self.hash_functions(string) for <fim_suffix> range(self.hash_count): index = (hash1 + seed * hash2) % self.size self.bit_array[index] = 1 #print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}") def save(self): self.persistence.save(self.bit_array) def load(self): self.bit_array = self.persistence.load() length_in_bytes = int(len(self.bit_array)/8) expected_length = math.ceil(self.size / 8) if length_in_bytes != expected_length: logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.") self.bit_array, self.indices = self.init_bit_array(self.size) self.save() @staticmethod def optimal_bloom_filter_params(n, p): """ Calculate the optimal bit array size (m) and number of hash functions (k) for a Bloom filter. n: expected number of items to be stored p: acceptable false positive probability Returns a tuple (m, k) """ m = - (n * math.log(p)) / (math.log(2) ** 2) k = (m / n) * math.log(2) return int(math.ceil(m)), int(math.ceil(k))<fim_middle>seed in
seed in
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for <fim_suffix> data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>item in
item in
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for <fim_suffix> in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>i, item
i, item
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for <fim_suffix> in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>item
item
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \ Type, Sequence, Tuple, Optional from pydantic import BaseModel, create_model import datetime class Validator: def __init__(self): # Extract types from collections and collections.abc collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)} abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)} # Filter out types that have dictionary-like methods self.dict_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'keys') and hasattr(cls, 'items') } self.list_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'append') and hasattr(cls, 'pop') } self.set_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, 'add') and hasattr(cls, 'discard') } # Add the general Sequence to list-like types # if python version is 3.9 or above, use collections.abc.Sequence if hasattr(collections.abc, 'Sequence'): self.list_like_types.add(collections.abc.Sequence) else: self.list_like_types.add(collections.Sequence) self.list_like_types.add(typing.List) # Add the general Mapping to dict-like types if hasattr(collections.abc, 'Mapping'): self.dict_like_types.add(collections.abc.Mapping) else: self.dict_like_types.add(collections.Mapping) self.dict_like_types.add(typing.Dict) # Add the general Set to set-like types if hasattr(collections.abc, 'Set'): self.set_like_types.add(collections.abc.Set) else: self.set_like_types.add(collections.Set) self.set_like_types.add(typing.Set) # Add the general Tuple to tuple-like types self.tuple_like_types = { cls for cls in collection_types.union(abc_collection_types) if hasattr(cls, '__getitem__') and hasattr(cls, '__len__') } self.tuple_like_types.add(typing.Tuple) def is_base_type(self, _type: Any) -> bool: """Determine if a type is a base type.""" return _type in {int, float, str, bool, None} def validate_base_type(self, value: Any, typ: Any) -> bool: """Validate base types.""" if typ is None: return value is None return isinstance(value, typ) def validate_output(self, output: str, type_definition: Any) -> bool: try: deserialized_output = json.loads(output) except json.JSONDecodeError: return False return self.check_type(deserialized_output, type_definition) def check_type(self, value: Any, type_definition: Any) -> bool: """ Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """ if type_definition is Any: return True if self.is_base_type(type_definition): return self.validate_base_type(value, type_definition) origin = get_origin(type_definition) or type_definition args = get_args(type_definition) # Handle base types if self.is_base_type(origin): return self.validate_base_type(value, origin) if origin == Literal: return value in args if origin == Union: return any(self.check_type(value, union_type) for union_type in args) # Handle tuples if origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle lists if origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle more complex types that are collections and list-like if origin is list or issubclass(origin, tuple(self.list_like_types)): if not any(isinstance(value, t) for t in self.list_like_types): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle sets if origin == set: if not isinstance(value, set): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value) # Handle datetime if origin in [datetime.datetime, datetime.date, datetime.time]: # try to instantiate datetime try: obj = origin(**value) return True except: return False # Handle dictionaries if origin is dict or issubclass(origin, tuple(self.dict_like_types)): if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)): return False if args: if len(args) == 1: key_type = args[0] value_type = Any # General assumption; specific dict-like types might differ elif len(args) == 2: key_type, value_type = args else: key_type = value_type = Any else: key_type = value_type = Any return all( self.check_type(k, key_type) and self.check_type(v, value_type) for k, v in value.items() ) # Handle pydantic models if self.is_pydantic_model(origin): try: #temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False # get all required init arguments for origin # required arguments are the ones withouyt default values required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))] # check that all required arguments are in value and do type checking for arg in required_fields: # check if it is in value if arg not in value: return False # get the type of the argument arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False # check that all arguments in value are correct type # this is additional check, because the above check only checks required arguments for arg, obj in value.items(): if arg in required_fields: continue arg_type = origin.__annotations__[arg] if not self.check_type(value[arg], arg_type): return False #origin.parse_obj(value) return True except Exception as e: print(e) return False # Handle dataclasses if self.is_dataclass_instance(origin): try: # for field in dataclasses.fields(origin): # field_name = field.name # field_type = field.type # if field_name not in value or not self.check_type(value[field_name], field_type): # return False # return True obj = origin(**value) return dataclasses.asdict(obj) == value except: return False # Handle dataclasses and arbitrary class types if inspect.isclass(origin) and not self.is_base_type(origin): # Ensure the value is an instance of the class if not isinstance(value, origin): return False # Gather type hints from the class and its bases type_hints = {} for cls in reversed(origin.__mro__): type_hints.update(get_type_hints(cls)) # Validate each attribute of the class for attr, attr_type in type_hints.items(): attr_value = getattr(value, attr, None) if not self.check_type(attr_value, attr_type): return False return True return False @staticmethod def is_pydantic_model(cls): return hasattr(cls, 'parse_obj') @staticmethod def is_dataclass_instance(cls): return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__') @staticmethod def _is_subclass_of_generic(cls: Type, generic: Type) -> bool: """Determine if the class is a subclass of a generic type.""" try: return issubclass(cls, generic) and cls is not generic except TypeError: if not hasattr(cls, '__origin__'): return False return cls.__origin__ is generic @staticmethod def _is_generic(cls: Type) -> bool: """Check if the provided type is a generic.""" return hasattr(cls, "__origin__") def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]: """ Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that retains the type arguments. :return: Type chain """ if get_args(target_type): return get_args(target_type) for base in target_type.__bases__: args = self._get_recursive_args(base) if args: return args return () def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]: """ Navigate up the MRO to find the first generic base and its arguments. """ # First, check if target_type is a type annotation. # If so, directly return its origin and arguments. origin = get_origin(target_type) args = get_args(target_type) if origin and args: return origin, args # If target_type is a real class, then navigate its MRO. if hasattr(target_type, '__mro__'): if hasattr(target_type, '__orig_bases__'): for base <fim_suffix> target_type.__orig_bases__: if get_args(base): return base, get_args(base) for base in target_type.__mro__: if get_args(base): return base, get_args(base) return None, () def _is_list_like(self, target_type: Type) -> bool: """Determine if the target type is list-like.""" if target_type in {list, typing.List}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}: return True return False def _is_tuple_like(self, target_type: Type) -> bool: """Determine if the target type is tuple-like.""" if target_type in {tuple, typing.Tuple}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}: return True return False def _is_dict_like(self, target_type: Type) -> bool: """Determine if the target type is dict-like.""" if target_type in {dict, typing.Dict}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}: return True return False def _is_set_like(self, target_type: Type) -> bool: """Determine if the target type is set-like.""" if target_type in {set, typing.Set}: return True if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}: return True return False def instantiate(self, data: Any, target_type: Type) -> Any: """ Attempts to convert a JSON-compatible data structure into an instance of the specified type. Args: data: JSON-compatible data structure to instantiate the target type. target_type: The type to instantiate from the given data. Returns: An instance of the target type initialized with the data. """ # Handle None type if data is None: return None origin = get_origin(target_type) or target_type # If the target type is a built-in, attempt to instantiate and return if self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: return data try: return target_type(data) except (ValueError, TypeError): # Handle the special case where the string represents a float but we want an integer if target_type is int: try: return int(float(data)) except (ValueError, TypeError): pass if target_type is float: try: return int(float(data)) except (ValueError, TypeError): pass raise TypeError(f"Failed to instantiate {target_type} from provided data.") # special handling for datetime if origin == datetime.datetime: # try to instantiate datetime try: return datetime.datetime(**data) except: raise TypeError(f"Failed to instantiate {target_type} from provided data.") # check if origin is Union, if so, instantiate the first type that works if origin == Union: for arg in get_args(target_type): try: return self.instantiate(data, arg) except: continue raise TypeError(f"Failed to instantiate {target_type} from provided data.") # If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary. if isinstance(data, dict): if inspect.isclass(target_type) and not self.is_base_type(target_type): # Special handling for dataclasses if is_dataclass(target_type): fields = [f.name for f in dataclasses.fields(target_type)] type_hints = get_type_hints(target_type) filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if k in fields} return target_type(**filtered_data) # Special handling for Pydantic models if issubclass(target_type, BaseModel): # instantiate the sub attributes for attr, attr_type in target_type.__annotations__.items(): if attr in data: data[attr] = self.instantiate(data[attr], attr_type) try: return target_type.model_validate(data) except AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data) # For general classes, attempt instantiation try: return target_type(**data) except TypeError: raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") # Handle dictionary-like types # Check if the target type is or inherits from defaultdict if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity, # but you might want to adapt this based on your needs. return defaultdict(int, instantiated_items) # Handle set-like dict types like OrderedDict # the first check needs to be done to ensure origin has the __mro__ attribute elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} return origin(instantiated_items) # Handle other dictionary-like types elif origin is dict or self._is_subclass_of_generic(origin, dict): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()} # If the target_type is a subclass of dict, return an instance of target_type if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type): return target_type(instantiated_dict) else: return dict(instantiated_dict) # Tuples aren't supported in JSONable types, so we look for lists instead if isinstance(data, list): try: # If the origin or target type is a list-like type, or if it implements a list-like collections type # e.g Sequence[int] if origin is list or self._is_subclass_of_generic(origin, list): base, item_types = self._find_generic_base_and_args(target_type) item_type = item_types[0] if item_types else Any instantiated_items = [] for item in data: # For each item, validate and instantiate it try: instantiated_item = self.instantiate(item, item_type) except ValueError: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") safe = self.check_type(instantiated_item, item_type) if not safe: raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") instantiated_items.append(instantiated_item) # If target_type is a subclass of list, return an instance of target_type if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items) return instantiated_items # Handle tuples if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)): base, item_types = self._find_generic_base_and_args(target_type) instantiated_items = [] # If there are no subscripted types, assume Any if not item_types: item_types = (Any,) * len(data) for i, item in enumerate(data): # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_types[i]) instantiated_items.append(instantiated_item) # If the instantiated item does not match the expected type, raise an exception _type = item_types[i] if not isinstance(instantiated_item, _type): raise TypeError( f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.") # Convert the list of instantiated items to a tuple instantiated_tuple = tuple(instantiated_items) # If target_type is a subclass of tuple, return an instance of target_type if self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple) return instantiated_tuple # Handle sets if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)): base, item_type = self._find_generic_base_and_args(target_type) if not item_type: item_type = Any instantiated_items = set() for item in data: # For each item, validate and instantiate it instantiated_item = self.instantiate(item, item_type[0]) instantiated_items.add(instantiated_item) # If the instantiated item does not match the expected type, raise an exception if not isinstance(instantiated_item, item_type[0]): raise TypeError( f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") # If target_type is a subclass of set, return an instance of target_type if self._is_subclass_of_generic(target_type, set): return target_type(instantiated_items) return instantiated_items # Handle deques if origin is deque or (isinstance(origin, type) and issubclass(origin, set)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return deque(self.instantiate(item, item_type) for item in data) if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)): item_type = get_args(target_type)[0] if get_args(target_type) else Any return frozenset(self.instantiate(item, item_type) for item in data) except TypeError as e: print(e) raise TypeError(f"Failed to instantiate {target_type} from list. {e}") # If none of the above, return the data as-is return data <fim_middle>in
in
FOR
complete_current_header_inner_block_completion
<filename>tanuki_py/src/tanuki/language_models/openai_api.py<fim_prefix>from typing import List import logging import time # import abstract base class from openai import OpenAI from openai.types import CreateEmbeddingResponse from openai.types.fine_tuning import FineTuningJob from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API from tanuki.models.embedding import Embedding from tanuki.language_models.embedding_api_abc import Embedding_API from tanuki.language_models.llm_api_abc import LLM_API import os from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME from tanuki.language_models.llm_configs.openai_config import OpenAIConfig from tanuki.models.finetune_job import FinetuneJob import copy OPENAI_URL = "https://api.openai.com/v1/chat/completions" import requests LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"] class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API): def __init__(self) -> None: # initialise the abstract base class super().__init__() self.api_key = os.environ.get("OPENAI_API_KEY") self.client = None def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]: """ Generate embeddings for the provided texts using the specified OpenAI model. Lightweight wrapper over the OpenAI client. :param texts: A list of texts to embed. :param model: The model to use for embeddings. :return: A list of embeddings. """ self.check_api_key() try: response: CreateEmbeddingResponse = self.client.embeddings.create( input=texts, model=model.model_name, **kwargs ) assert response.object == "list" assert len(response.data) == len(texts) embeddings = [] for embedding_response in response.data: assert embedding_response.object == "embedding" embeddings.append(Embedding(embedding_response.embedding)) return embeddings except Exception as e: print(f"An error occurred: {e}") return None def generate(self, model, system_message, prompt, **kwargs): """ The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response Args model (OpenAIConfig): The model to use for generation. system_message (str): The system message to use for generation. prompt (str): The prompt to use for generation. kwargs (dict): Additional generation parameters. """ self.check_api_key() temperature = kwargs.get("temperature", 0.1) top_p = kwargs.get("top_p", 1) frequency_penalty = kwargs.get("frequency_penalty", 0) presence_penalty = kwargs.get("presence_penalty", 0) max_new_tokens = kwargs.get("max_new_tokens") # check if there are any generation parameters that are not supported unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS] if len(unsupported_params) > 0: # log warning logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\ f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}") params = { "model": model.model_name, "temperature": temperature, "max_tokens": max_new_tokens, "top_p": top_p, "frequency_penalty": frequency_penalty, "presence_penalty": presence_penalty, } if model.parsing_helper_tokens["start_token"]: prompt += model.parsing_helper_tokens["start_token"] messages = [ { "role": "system", "content": system_message }, { "role": "user", "content": prompt } ] params["messages"] = messages counter = 0 choice = None # initiate response so exception logic doesnt error out when checking for error in response response = {} while <fim_suffix> 5: try: openai_headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", } response = requests.post( OPENAI_URL, headers=openai_headers, json=params, timeout=50 ) response = response.json() choice = response["choices"][0]["message"]["content"].strip("'") break except Exception as e: if ("error" in response and "code" in response["error"] and response["error"]["code"] == 'invalid_api_key'): raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid") if counter == 5: raise Exception(f"OpenAI API failed to generate a response: {e}") counter += 1 time.sleep(2 ** counter) continue if not choice: raise Exception("OpenAI API failed to generate a response") if model.parsing_helper_tokens["end_token"]: # remove the end token from the choice choice = choice.split(model.parsing_helper_tokens["end_token"])[0] # check if starting token is in choice if model.parsing_helper_tokens["start_token"] in choice: # remove the starting token from the choice choice = choice.split(model.parsing_helper_tokens["start_token"])[-1] return choice def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]: self.check_api_key() response = self.client.fine_tuning.jobs.list(limit=limit) jobs = [] for job in response.data: finetune_job = self.create_finetune_job(job, model_config) jobs.append(finetune_job) return jobs def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob: self.check_api_key() response = self.client.fine_tuning.jobs.retrieve(job_id) finetune_job = self.create_finetune_job(response, model_config= model_config) return finetune_job def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob: self.check_api_key() # Use the stream as a file response = self.client.files.create(file=file, purpose='fine-tune') training_file_id = response.id if not model_config.base_model_for_sft: model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME # submit the finetuning job finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id, model=model_config.base_model_for_sft, suffix=suffix) finetune_job = self.create_finetune_job(finetuning_response, model_config) return finetune_job def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob: finetuned_model_config = copy.deepcopy(model_config) finetuned_model_config.model_name = response.fine_tuned_model finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config) return finetune_job def check_api_key(self): # check if api key is not none if not self.api_key: # try to get the api key from the environment, maybe it has been set later self.api_key = os.getenv("OPENAI_API_KEY") if not self.api_key: raise ValueError("OpenAI API key is not set") if not self.client: self.client = OpenAI(api_key=self.api_key) <fim_middle>counter <=
counter <=
WHILE
complete_current_header_inner_block_completion
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix># @Author : YeZhaohui Wang # @Email : [email protected] import csv import json import os import random from uhgeval.dataset.base import BaseDataset class TruthfunQAGeneration(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] if os.path.isfile(path): with open(path, 'r', encoding='utf-8-sig') as file: csv_reader = csv.DictReader(file) id = 1 for row in csv_reader: row['id'] = id id += 1 self.data.append(row) if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] class TruthfunQAMC1(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [<fim_suffix>] id = 1 if os.path.isfile(path): with open(path, encoding='utf-8') as f: self.data = json.load(f) for row in self.data: row['id'] = id id += 1 if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] class TruthfunQAMC2(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] id = 1 if os.path.isfile(path): with open(path, encoding='utf-8') as f: self.data = json.load(f) for row in self.data: row['id'] = id id += 1 if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, pre<fim_suffix>cision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision +<fim_suffix> recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logg<fim_suffix>er.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positi<fim_suffix>ve = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if <fim_suffix>a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 fo<fim_suffix>r a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (tru<fim_suffix>e_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix># @Author : YeZhaohui Wang # @Email : [email protected] import csv import json import os import random from uhgeval.dataset.base import BaseDataset class TruthfunQAGeneration(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] if os.path.isfile(path): with open(path, 'r', encoding='utf-8-sig') as file: csv_reader = csv.DictReader(file) id = 1 for row in csv_reader: row['id'] = id id += 1 self.data.append(row) if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] class TruthfunQAMC1(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] id = 1 if os.path.isfile(path): with open(path, encoding='utf-8') as f: self.data = json.load(f) for row in self.data: row['id'] = id id += 1 if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[<fim_suffix>:] class TruthfunQAMC2(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] id = 1 if os.path.isfile(path): with open(path, encoding='utf-8') as f: self.data = json.load(f) for row in self.data: row['id'] = id id += 1 if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) retu<fim_suffix>rn result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if pr<fim_suffix>ecision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): tr<fim_suffix>y: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
TRY
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as <fim_suffix>e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: """ Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
CATCH
complete_current_header_empty_completion
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix># @Author : Shichao Song # @Email : [email protected] from typing import Callable import evaluate import jieba from loguru import logger from text2vec import Similarity def catch_all_exceptions(func): def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) return result except Exception as e: logger.warning(repr(e)) return wrapper @catch_all_exceptions def bleu4_score( continuation: str, reference: str, with_penalty = False ) -> float: import math from nltk.translate.bleu_score import sentence_bleu # Tokenize the continuation and reference texts using the custom tokenizer function continuation_tokens = custom_tokenizer(continuation) reference_tokens = custom_tokenizer(reference) # Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25)) # If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty if with_penalty: # Calculate the length of the reference and continuation texts reference_length = len(reference_tokens) continuation_length = len(continuation_tokens) # Calculate the brevity penalty factor if continuation_length > reference_length: brevity_penalty = 1 else: brevity_penalty = math.exp(1 - (reference_length / continuation_length)) # Adjust the BLEU score with the brevity penalty bleu_score = bleu_score * brevity_penalty return bleu_score @catch_all_exceptions def rougeL_score( continuation: str, reference: str ) -> float: f = lambda text: list(jieba.cut(text)) rouge = evaluate.load('uhgeval/.cache/huggingface/rouge') results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL']) score = results['rougeL'] return score @catch_all_exceptions def kw_precision( continuation: str, reference: str, kw_extracter: Callable[[str], list[str]], with_kw_list: bool = True ) -> float | tuple[float, list[str], list[str]]: """Measure the rationality of a generated continuation sentence with respect to the original news object.""" kws = kw_extracter(continuation) if len(kws) == 0: return 0, [], [] if with_kw_list else 0 appeared_kws = [kw for kw in kws if kw in reference] precision = len(appeared_kws) / len(kws) return precision, appeared_kws, kws if with_kw_list else precision @catch_all_exceptions def bert_score( continuation: str, reference: str ) -> float: """ Note: Requesting the network to connect to Hugging Face. """ sim = Similarity() score = sim.get_score(continuation, reference) return score def classifications( predictions: list[bool], references: list[bool] ) -> tuple[float, float, float, float]: "<fim_suffix>"" Calculate accuracy, precision, recall, and F1 in a binary classification problem. Args: predictions (list[bool]): List of predicted values (0 or 1). references (list[bool]): List of true values (0 or 1). Returns: tuple: Accuracy, precision, recall, and F1 scores. """ true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0) precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0 if precision + recall == 0: f1 = 0 else: f1 = 2 * (precision * recall) / (precision + recall) accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 return accuracy, precision, recall, f1 <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/image_list.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. from __future__ import division from typing import Any, List, Tuple import torch from torch import device from torch.nn import functional as F from detectron2.layers.wrappers import shapes_to_tensor class ImageList(object): """ Structure that holds a list of images (of possibly varying sizes) as a single tensor. This works by padding the images to the same size. The original sizes of each image is stored in `image_sizes`. Attributes: image_sizes (list[tuple[int, int]]): each tuple is (h, w). During tracing, it becomes list[Tensor] instead. """ def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): """ Arguments: tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can be smaller than (H, W) due to padding. """ self.tensor = tensor self.image_sizes = image_sizes def __len__(self) -> int: return len(self.image_sizes) def __getitem__(self, idx) -> torch.Tensor: "<fim_suffix>"" Access the individual image in its original size. Args: idx: int or slice Returns: Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 """ size = self.image_sizes[idx] return self.tensor[idx, ..., : size[0], : size[1]] @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "ImageList": cast_tensor = self.tensor.to(*args, **kwargs) return ImageList(cast_tensor, self.image_sizes) @property def device(self) -> device: return self.tensor.device @staticmethod def from_tensors( tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 ) -> "ImageList": """ Args: tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded to the same shape with `pad_value`. size_divisibility (int): If `size_divisibility > 0`, add padding to ensure the common height and width is divisible by `size_divisibility`. This depends on the model and many models need a divisibility of 32. pad_value (float): value to pad Returns: an `ImageList`. """ assert len(tensors) > 0 assert isinstance(tensors, (tuple, list)) for t in tensors: assert isinstance(t, torch.Tensor), type(t) assert t.shape[:-2] == tensors[0].shape[:-2], t.shape image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] max_size = torch.stack(image_sizes_tensor).max(0).values if size_divisibility > 1: stride = size_divisibility # the last two dims are H,W, both subject to divisibility requirement max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride # handle weirdness of scripting and tracing ... if torch.jit.is_scripting(): max_size: List[int] = max_size.to(dtype=torch.long).tolist() else: if torch.jit.is_tracing(): image_sizes = image_sizes_tensor if len(tensors) == 1: # This seems slightly (2%) faster. # TODO: check whether it's faster for multiple images as well image_size = image_sizes[0] padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) else: # max_size can be a tensor in tracing mode, therefore convert to list batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) batched_imgs = tensors[0].new_full(batch_shape, pad_value) for img, pad_img in zip(tensors, batched_imgs): pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) return ImageList(batched_imgs.contiguous(), image_sizes) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/solver/build.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import logging from collections import defaultdict from enum import Enum from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union import torch from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler from detectron2.config import CfgNode from .lr_scheduler import LRMultiplier, WarmupParamScheduler _GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] _GradientClipper = Callable[[_GradientClipperInput], None] class GradientClipType(Enum): VALUE = "value" NORM = "norm" def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: """ Creates gradient clipping closure to clip by value or by norm, according to the provided config. """ cfg = copy.deepcopy(cfg) def clip_grad_norm(p: _GradientClipperInput): torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) def clip_grad_value(p: _GradientClipperInput): torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) _GRADIENT_CLIP_TYPE_TO_CLIPPER = { GradientClipType.VALUE: clip_grad_value, GradientClipType.NORM: clip_grad_norm, } return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] def _generate_optimizer_class_with_gradient_clipping( optimizer: Type[torch.optim.Optimizer], *, per_param_clipper: Optional[_GradientClipper] = None, global_clipper: Optional[_GradientClipper] = None, ) -> Type[torch.optim.Optimizer]: """ Dynamically creates a new type that inherits the type of a given instance and overrides the `step` method to add gradient clipping """ assert ( per_param_clipper is None or global_clipper is None ), "Not allowed to use both per-parameter clipping and global clipping" def optimizer_wgc_step(self, closure=None): if per_param_clipper is not None: for group in self.param_groups: for p in group["params"]: per_param_clipper(p) else: # global clipper for future use with detr # (https://github.com/facebookresearch/detr/pull/287) all_params = itertools.chain(*[g["params"] for g in self.param_groups]) global_clipper(all_params) super(type(self), self).step(closure) OptimizerWithGradientClip = type( optimizer.__name__ + "WithGradientClip", (optimizer,), {"step": optimizer_wgc_step}, ) return OptimizerWithGradientClip def maybe_add_gradient_clipping( cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] ) -> Type[torch.optim.Optimizer]: """ If gradient clipping is enabled through config options, wraps the existing optimizer type to become a new dynamically created class OptimizerWithGradientClip that inherits the given optimizer and overrides the `step` method to include gradient clipping. Args: cfg: CfgNode, configuration options optimizer: type. A subclass of torch.optim.Optimizer Return: type: either the input `optimizer` (if gradient clipping is disabled), or a subclass of it with gradient clipping included in the `step` method. """ if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: return optimizer if isinstance(optimizer, torch.optim.Optimizer): optimizer_type = type(optimizer) else: assert issubclass(optimizer, torch.optim.Optimizer), optimizer optimizer_type = optimizer grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( optimizer_type, per_param_clipper=grad_clipper ) if isinstance(optimizer, torch.optim.Optimizer): optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended return optimizer else: return OptimizerWithGradientClip def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: """ Build an optimizer from config. """ params = get_default_optimizer_params( model, base_lr=cfg.SOLVER.BASE_LR, weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, ) return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( params, lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) def get_default_optimizer_params( model: torch.nn.Module, base_lr: Optional[float] = None, weight_decay: Optional[float] = None, weight_decay_norm: Optional[float] = None, bias_lr_factor: Optional[float] = 1.0, weight_decay_bias: Optional[float] = None, overrides: Optional[Dict[str, Dict[str, float]]] = None, ) -> List[Dict[str, Any]]: """ Get default param list for optimizer, with support for a few types of overrides. If no overrides needed, this is equivalent to `model.parameters()`. Args: base_lr: lr for every group by default. Can be omitted to use the one in optimizer. weight_decay: weight decay for every group by default. Can be omitted to use the one in optimizer. weight_decay_norm: override weight decay for params in normalization layers bias_lr_factor: multiplier of lr for bias parameters. weight_decay_bias: override weight decay for bias parameters overrides: if not `None`, provides values for optimizer hyperparameters (LR, weight decay) for module parameters with a given name; e.g. ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and weight decay values for all module parameters named `embedding`. For common detection models, ``weight_decay_norm`` is the only option needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings from Detectron1 that are not found useful. Example: :: torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), lr=0.01, weight_decay=1e-4, momentum=0.9) """ if overrides is None: overrides = {} defaults = {} if base_lr is not None: defaults["lr"] = base_lr if weight_decay is not None: defaults["weight_decay"] = weight_decay bias_overrides = {} if bias_lr_factor is not None and bias_lr_factor != 1.0: # NOTE: unlike Detectron v1, we now by default make bias hyperparameters # exactly the same as regular weights. if base_lr is None: raise ValueError("bias_lr_factor requires base_lr") bias_overrides["lr"] = base_lr * bias_lr_factor if weight_decay_bias is not None: bias_overrides["weight_decay"] = weight_decay_bias if len(bias_overrides): if "bias" in overrides: raise ValueError("Conflicting overrides for 'bias'") overrides["bias"] = bias_overrides norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module in model.modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if isinstance(module, norm_module_types) and weight_decay_norm is not None: hyperparams["weight_decay"] = weight_decay_norm hyperparams.update(overrides.get(module_param_name, {})) params.append({"params": [value], **hyperparams}) return reduce_param_groups(params) def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Transform parameter groups into per-parameter structure. # Later items in `params` can overwrite parameters set in previous items. ret = defaultdict(dict) for item in params: assert "params" in item cur_params = {x: y for x, y in item.items() if x != "params"} for param in item["params"]: ret[param].update({"params": [param], **cur_params}) return list(ret.values()) def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Reorganize the parameter groups and merge duplicated groups. # The number of parameter groups needs to be as small as possible in order # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead # of using a parameter_group per single parameter, we reorganize the # parameter groups and merge duplicated groups. This approach speeds # up multi-tensor optimizer significantly. params = _expand_param_groups(params) groups = defaultdict(list) # re-group all parameter groups by their hyperparams for item in params: cur_params = tuple((x, y) for x, y in item.items() if x != "params") groups[cur_params].extend(item["params"]) ret = [] for param_keys, param_values in groups.items(): cur = {kv[0]: kv[1] for kv in param_keys} cur["params"] = param_values ret.append(cur) return ret def build_lr_scheduler( cfg: CfgNode, optimizer: torch.optim.Optimizer ) -> torch.optim.lr_scheduler._LRScheduler: "<fim_suffix>"" Build a LR scheduler from config. """ name = cfg.SOLVER.LR_SCHEDULER_NAME if name == "WarmupMultiStepLR": steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] if len(steps) != len(cfg.SOLVER.STEPS): logger = logging.getLogger(__name__) logger.warning( "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " "These values will be ignored." ) sched = MultiStepParamScheduler( values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)], milestones=steps, num_updates=cfg.SOLVER.MAX_ITER, ) elif name == "WarmupCosineLR": end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR assert end_value >= 0.0 and end_value <= 1.0, end_value sched = CosineParamScheduler(1, end_value) else: raise ValueError("Unknown LR scheduler: {}".format(name)) sched = WarmupParamScheduler( sched, cfg.SOLVER.WARMUP_FACTOR, min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), cfg.SOLVER.WARMUP_METHOD, ) return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: "<fim_suffix>"" For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: ""<fim_suffix>" Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): ""<fim_suffix>" Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import math import numpy as np from enum import IntEnum, unique from typing import List, Tuple, Union import torch from torch import device _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] @unique class BoxMode(IntEnum): """ Enum of different ways to represent a box. """ XYXY_ABS = 0 """ (x0, y0, x1, y1) in absolute floating points coordinates. The coordinates in range [0, width or height]. """ XYWH_ABS = 1 """ (x0, y0, w, h) in absolute floating points coordinates. """ XYXY_REL = 2 """ Not yet supported! (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. """ XYWH_REL = 3 """ Not yet supported! (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. """ XYWHA_ABS = 4 """ (xc, yc, w, h, a) in absolute floating points coordinates. (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. """ @staticmethod def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: """ Args: box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 from_mode, to_mode (BoxMode) Returns: The converted box of the same type. """ if from_mode == to_mode: return box original_type = type(box) is_numpy = isinstance(box, np.ndarray) single_box = isinstance(box, (list, tuple)) if single_box: assert len(box) == 4 or len(box) == 5, ( "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," " where k == 4 or 5" ) arr = torch.tensor(box)[None, :] else: # avoid modifying the input box if is_numpy: arr = torch.from_numpy(np.asarray(box)).clone() else: arr = box.clone() assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ BoxMode.XYXY_REL, BoxMode.XYWH_REL, ], "Relative mode not yet supported!" if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: assert ( arr.shape[-1] == 5 ), "The last dimension of input shape must be 5 for XYWHA format" original_dtype = arr.dtype arr = arr.double() w = arr[:, 2] h = arr[:, 3] a = arr[:, 4] c = torch.abs(torch.cos(a * math.pi / 180.0)) s = torch.abs(torch.sin(a * math.pi / 180.0)) # This basically computes the horizontal bounding rectangle of the rotated box new_w = c * w + s * h new_h = c * h + s * w # convert center to top-left corner arr[:, 0] -= new_w / 2.0 arr[:, 1] -= new_h / 2.0 # bottom-right corner arr[:, 2] = arr[:, 0] + new_w arr[:, 3] = arr[:, 1] + new_h arr = arr[:, :4].to(dtype=original_dtype) elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: original_dtype = arr.dtype arr = arr.double() arr[:, 0] += arr[:, 2] / 2.0 arr[:, 1] += arr[:, 3] / 2.0 angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) else: if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: arr[:, 2] += arr[:, 0] arr[:, 3] += arr[:, 1] elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: arr[:, 2] -= arr[:, 0] arr[:, 3] -= arr[:, 1] else: raise NotImplementedError( "Conversion from BoxMode {} to {} is not supported yet".format( from_mode, to_mode ) ) if single_box: return original_type(arr.flatten().tolist()) if is_numpy: return arr.numpy() else: return arr class Boxes: """ This structure stores a list of boxes as a Nx4 torch.Tensor. It supports some common methods about boxes (`area`, `clip`, `nonempty`, etc), and also behaves like a Tensor (support indexing, `to(device)`, `.device`, and iteration over all boxes) Attributes: tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). """ def __init__(self, tensor: torch.Tensor): """ Args: tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) if tensor.numel() == 0: # Use reshape, so we don't end up creating a new tensor that does not depend on # the inputs (and consequently confuses jit) tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device) assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() self.tensor = tensor def clone(self) -> "Boxes": """ Clone the Boxes. Returns: Boxes """ return Boxes(self.tensor.clone()) def to(self, device: torch.device): # Boxes are assumed float32 and does not support to(dtype) return Boxes(self.tensor.to(device=device)) def area(self) -> torch.Tensor: """ Computes the area of all the boxes. Returns: torch.Tensor: a vector with areas of each box. """ box = self.tensor area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) return area def clip(self, box_size: Tuple[int, int]) -> None: """ Clip (in place) the boxes by limiting x coordinates to the range [0, width] and y coordinates to the range [0, height]. Args: box_size (height, width): The clipping box's size. """ assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size x1 = self.tensor[:, 0].clamp(min=0, max=w) y1 = self.tensor[:, 1].clamp(min=0, max=h) x2 = self.tensor[:, 2].clamp(min=0, max=w) y2 = self.tensor[:, 3].clamp(min=0, max=h) self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) def nonempty(self, threshold: float = 0.0) -> torch.Tensor: """ Find boxes that are non-empty. A box is considered empty, if either of its side is no larger than threshold. Returns: Tensor: a binary vector which represents whether each box is empty (False) or non-empty (True). """ box = self.tensor widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def __getitem__(self, item) -> "Boxes": ""<fim_suffix>" Args: item: int, slice, or a BoolTensor Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b) def __len__(self) -> int: return self.tensor.shape[0] def __repr__(self) -> str: return "Boxes(" + str(self.tensor) + ")" def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: """ Args: box_size (height, width): Size of the reference box. boundary_threshold (int): Boxes that extend beyond the reference box boundary by more than boundary_threshold are considered "outside". Returns: a binary vector, indicating whether each box is inside the reference box. """ height, width = box_size inds_inside = ( (self.tensor[..., 0] >= -boundary_threshold) & (self.tensor[..., 1] >= -boundary_threshold) & (self.tensor[..., 2] < width + boundary_threshold) & (self.tensor[..., 3] < height + boundary_threshold) ) return inds_inside def get_centers(self) -> torch.Tensor: """ Returns: The box centers in a Nx2 array of (x, y). """ return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 def scale(self, scale_x: float, scale_y: float) -> None: """ Scale the box with horizontal and vertical scaling factors """ self.tensor[:, 0::2] *= scale_x self.tensor[:, 1::2] *= scale_y @classmethod def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": """ Concatenates a list of Boxes into a single Boxes Arguments: boxes_list (list[Boxes]) Returns: Boxes: the concatenated Boxes """ assert isinstance(boxes_list, (list, tuple)) if len(boxes_list) == 0: return cls(torch.empty(0)) assert all([isinstance(box, Boxes) for box in boxes_list]) # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) return cat_boxes @property def device(self) -> device: return self.tensor.device # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript # https://github.com/pytorch/pytorch/issues/18627 @torch.jit.unused def __iter__(self): """ Yield a box as a Tensor of shape (4,) at a time. """ yield from self.tensor def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the intersection area between __all__ N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax) Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: intersection, sized [N,M]. """ boxes1, boxes2 = boxes1.tensor, boxes2.tensor width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( boxes1[:, None, :2], boxes2[:, :2] ) # [N,M,2] width_height.clamp_(min=0) # [N,M,2] intersection = width_height.prod(dim=2) # [N,M] return intersection # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the IoU (intersection over union) between **all** N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoU, sized [N,M]. """ area1 = boxes1.area() # [N] area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes iou = torch.where( inter > 0, inter / (area1[:, None] + area2 - inter), torch.zeros(1, dtype=inter.dtype, device=inter.device), ) return iou def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoA, sized [N,M]. """ area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes ioa = torch.where( inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) ) return ioa def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): """ Pairwise distance between N points and M boxes. The distance between a point and a box is represented by the distance from the point to 4 edges of the box. Distances are all positive when the point is inside the box. Args: points: Nx2 coordinates. Each row is (x, y) boxes: M boxes Returns: Tensor: distances of size (N, M, 4). The 4 values are distances from the point to the left, top, right, bottom of the box. """ x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Compute pairwise intersection over union (IOU) of two sets of matched boxes that have the same number of boxes. Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. Args: boxes1 (Boxes): bounding boxes, sized [N,4]. boxes2 (Boxes): same length as boxes1 Returns: Tensor: iou, sized [N]. """ assert len(boxes1) == len( boxes2 ), "boxlists should have the same" "number of entries, got {}, {}".format( len(boxes1), len(boxes2) ) area1 = boxes1.area() # [N] area2 = boxes2.area() # [N] box1, box2 = boxes1.tensor, boxes2.tensor lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] wh = (rb - lt).clamp(min=0) # [N,2] inter = wh[:, 0] * wh[:, 1] # [N] iou = inter / (area1 + area2 - inter) # [N] return iou <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/instances.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import itertools from typing import Any, Dict, List, Tuple, Union import torch class Instances: """ This class represents a list of instances in an image. It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". All fields must have the same ``__len__`` which is the number of instances. All other (non-field) attributes of this class are considered private: they must start with '_' and are not modifiable by a user. Some basic usage: 1. Set/get/check a field: .. code-block:: python instances.gt_boxes = Boxes(...) print(instances.pred_masks) # a tensor of shape (N, H, W) print('gt_masks' in instances) 2. ``len(instances)`` returns the number of instances 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields and returns a new :class:`Instances`. Typically, ``indices`` is a integer vector of indices, or a binary mask of length ``num_instances`` .. code-block:: python category_3_detections = instances[instances.pred_classes == 3] confident_detections = instances[instances.scores > 0.9] """ def __init__(self, image_size: Tuple[int, int], **kwargs: Any): "<fim_suffix>"" Args: image_size (height, width): the spatial size of the image. kwargs: fields to add to this `Instances`. """ self._image_size = image_size self._fields: Dict[str, Any] = {} for k, v in kwargs.items(): self.set(k, v) @property def image_size(self) -> Tuple[int, int]: """ Returns: tuple: height, width """ return self._image_size def __setattr__(self, name: str, val: Any) -> None: if name.startswith("_"): super().__setattr__(name, val) else: self.set(name, val) def __getattr__(self, name: str) -> Any: if name == "_fields" or name not in self._fields: raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) return self._fields[name] def set(self, name: str, value: Any) -> None: """ Set the field named `name` to `value`. The length of `value` must be the number of instances, and must agree with other existing fields in this object. """ data_len = len(value) if len(self._fields): assert ( len(self) == data_len ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) self._fields[name] = value def has(self, name: str) -> bool: """ Returns: bool: whether the field called `name` exists. """ return name in self._fields def remove(self, name: str) -> None: """ Remove the field called `name`. """ del self._fields[name] def get(self, name: str) -> Any: """ Returns the field called `name`. """ return self._fields[name] def get_fields(self) -> Dict[str, Any]: """ Returns: dict: a dict which maps names (str) to data of the fields Modifying the returned dict will modify this instance. """ return self._fields # Tensor-like methods def to(self, *args: Any, **kwargs: Any) -> "Instances": """ Returns: Instances: all fields are called with a `to(device)`, if the field has this method. """ ret = Instances(self._image_size) for k, v in self._fields.items(): if hasattr(v, "to"): v = v.to(*args, **kwargs) ret.set(k, v) return ret def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": """ Args: item: an index-like object and will be used to index all the fields. Returns: If `item` is a string, return the data in the corresponding field. Otherwise, returns an `Instances` where all fields are indexed by `item`. """ if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError("Instances index out of range!") else: item = slice(item, None, len(self)) ret = Instances(self._image_size) for k, v in self._fields.items(): ret.set(k, v[item]) return ret def __len__(self) -> int: for v in self._fields.values(): # use __len__ because len() has to be int and is not friendly to tracing return v.__len__() raise NotImplementedError("Empty Instances does not support __len__!") def __iter__(self): raise NotImplementedError("`Instances` object is not iterable!") @staticmethod def cat(instance_lists: List["Instances"]) -> "Instances": """ Args: instance_lists (list[Instances]) Returns: Instances """ assert all(isinstance(i, Instances) for i in instance_lists) assert len(instance_lists) > 0 if len(instance_lists) == 1: return instance_lists[0] image_size = instance_lists[0].image_size if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing for i in instance_lists[1:]: assert i.image_size == image_size ret = Instances(image_size) for k in instance_lists[0]._fields.keys(): values = [i.get(k) for i in instance_lists] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) elif hasattr(type(v0), "cat"): values = type(v0).cat(values) else: raise ValueError("Unsupported type {} for concatenation".format(type(v0))) ret.set(k, values) return ret def __str__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={}, ".format(len(self)) s += "image_height={}, ".format(self._image_size[0]) s += "image_width={}, ".format(self._image_size[1]) s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) return s __repr__ = __str__ <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): "<fim_suffix>"" Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): "<fim_suffix>"" Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: ""<fim_suffix>" If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) se<fim_suffix>lf._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Intersection between all sets inters = np.<fim_suffix>sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math import torch def diou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Distance Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = torch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # Eqn. (7) loss = 1 - iou + (distance / diag_len) if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss def ciou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Complete Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) ints<fim_suffix>ct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = torch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # width and height of boxes w_pred = x2 - x1 h_pred = y2 - y1 w_gt = x2g - x1g h_gt = y2g - y1g v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) with torch.no_grad(): alpha = v / (1 - iou + v + eps) # Eqn. (10) loss = 1 - iou + (distance / diag_len) + alpha * v if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math import torch def diou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Distance Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = torch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # Eqn. (7) loss = 1 - iou + (distance / diag_len) if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss def ciou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Complete Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = tor<fim_suffix>ch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # width and height of boxes w_pred = x2 - x1 h_pred = y2 - y1 w_gt = x2g - x1g h_gt = y2g - y1g v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) with torch.no_grad(): alpha = v / (1 - iou + v + eps) # Eqn. (10) loss = 1 - iou + (distance / diag_len) + alpha * v if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size): instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untrack<fim_suffix>ed_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), <fim_suffix>max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return m<fim_suffix>ask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] =<fim_suffix> maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>import math import torch def diou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Distance Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = torch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_<fim_suffix>g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # Eqn. (7) loss = 1 - iou + (distance / diag_len) if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss def ciou_loss( boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: """ Complete Intersection over Union Loss (Zhaohui Zheng et. al) https://arxiv.org/abs/1911.08287 Args: boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. eps (float): small number to prevent division by zero """ x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) # TODO: use torch._assert_async() when pytorch 1.8 support is dropped assert (x2 >= x1).all(), "bad box: x1 larger than x2" assert (y2 >= y1).all(), "bad box: y1 larger than y2" # Intersection keypoints xkis1 = torch.max(x1, x1g) ykis1 = torch.max(y1, y1g) xkis2 = torch.min(x2, x2g) ykis2 = torch.min(y2, y2g) intsct = torch.zeros_like(x1) mask = (ykis2 > ykis1) & (xkis2 > xkis1) intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps iou = intsct / union # smallest enclosing box xc1 = torch.min(x1, x1g) yc1 = torch.min(y1, y1g) xc2 = torch.max(x2, x2g) yc2 = torch.max(y2, y2g) diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps # centers of boxes x_p = (x2 + x1) / 2 y_p = (y2 + y1) / 2 x_g = (x1g + x2g) / 2 y_g = (y1g + y2g) / 2 distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) # width and height of boxes w_pred = x2 - x1 h_pred = y2 - y1 w_gt = x2g - x1g h_gt = y2g - y1g v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) with torch.no_grad(): alpha = v / (1 - iou + v + eps) # Eqn. (10) loss = 1 - iou + (distance / diag_len) + alpha * v if reduction == "mean": loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() elif reduction == "sum": loss = loss.sum() return loss <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size): instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._p<fim_suffix>rev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instance<fim_suffix>s.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size): instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Intersection between all sets inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if pre<fim_suffix>cision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forw<fim_suffix>ard all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_per<fim_suffix>iod"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(<fim_suffix>cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instanc<fim_suffix>es.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/instances.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import itertools from typing import Any, Dict, List, Tuple, Union import torch class Instances: """ This class represents a list of instances in an image. It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". All fields must have the same ``__len__`` which is the number of instances. All other (non-field) attributes of this class are considered private: they must start with '_' and are not modifiable by a user. Some basic usage: 1. Set/get/check a field: .. code-block:: python instances.gt_boxes = Boxes(...) print(instances.pred_masks) # a tensor of shape (N, H, W) print('gt_masks' in instances) 2. ``len(instances)`` returns the number of instances 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields and returns a new :class:`Instances`. Typically, ``indices`` is a integer vector of indices, or a binary mask of length ``num_instances`` .. code-block:: python category_3_detections = instances[instances.pred_classes == 3] confident_detections = instances[instances.scores > 0.9] """ def __init__(self, image_size: Tuple[int, int], **kwargs: Any): """ Args: image_size (height, width): the spatial size of the image. kwargs: fields to add to this `Instances`. """ self._image_size = image_size self._fields: Dict[str, Any] = {} for k, v in kwargs.items(): self.set(k, v) @property def image_size(self) -> Tuple[int, int]: """ Returns: tuple: height, width """ return self._image_size def __setattr__(self, name: str, val: Any) -> None: if name.startswith("_"): super().__setattr__(name, val) else: self.set(name, val) def __getattr__(self, name: str) -> Any: if name == "_fields" or nam<fim_suffix>e not in self._fields: raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) return self._fields[name] def set(self, name: str, value: Any) -> None: """ Set the field named `name` to `value`. The length of `value` must be the number of instances, and must agree with other existing fields in this object. """ data_len = len(value) if len(self._fields): assert ( len(self) == data_len ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) self._fields[name] = value def has(self, name: str) -> bool: """ Returns: bool: whether the field called `name` exists. """ return name in self._fields def remove(self, name: str) -> None: """ Remove the field called `name`. """ del self._fields[name] def get(self, name: str) -> Any: """ Returns the field called `name`. """ return self._fields[name] def get_fields(self) -> Dict[str, Any]: """ Returns: dict: a dict which maps names (str) to data of the fields Modifying the returned dict will modify this instance. """ return self._fields # Tensor-like methods def to(self, *args: Any, **kwargs: Any) -> "Instances": """ Returns: Instances: all fields are called with a `to(device)`, if the field has this method. """ ret = Instances(self._image_size) for k, v in self._fields.items(): if hasattr(v, "to"): v = v.to(*args, **kwargs) ret.set(k, v) return ret def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": """ Args: item: an index-like object and will be used to index all the fields. Returns: If `item` is a string, return the data in the corresponding field. Otherwise, returns an `Instances` where all fields are indexed by `item`. """ if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError("Instances index out of range!") else: item = slice(item, None, len(self)) ret = Instances(self._image_size) for k, v in self._fields.items(): ret.set(k, v[item]) return ret def __len__(self) -> int: for v in self._fields.values(): # use __len__ because len() has to be int and is not friendly to tracing return v.__len__() raise NotImplementedError("Empty Instances does not support __len__!") def __iter__(self): raise NotImplementedError("`Instances` object is not iterable!") @staticmethod def cat(instance_lists: List["Instances"]) -> "Instances": """ Args: instance_lists (list[Instances]) Returns: Instances """ assert all(isinstance(i, Instances) for i in instance_lists) assert len(instance_lists) > 0 if len(instance_lists) == 1: return instance_lists[0] image_size = instance_lists[0].image_size if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing for i in instance_lists[1:]: assert i.image_size == image_size ret = Instances(image_size) for k in instance_lists[0]._fields.keys(): values = [i.get(k) for i in instance_lists] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) elif hasattr(type(v0), "cat"): values = type(v0).cat(values) else: raise ValueError("Unsupported type {} for concatenation".format(type(v0))) ret.set(k, values) return ret def __str__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={}, ".format(len(self)) s += "image_height={}, ".format(self._image_size[0]) s += "image_width={}, ".format(self._image_size[1]) s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) return s __repr__ = __str__ <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_fr<fim_suffix>ame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is<fim_suffix> None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size): instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pr<fim_suffix>ed_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
IF
complete_current_header_empty_completion