path
stringlengths
15
77
type
stringclasses
1 value
project
stringclasses
1 value
commit_hash
stringlengths
40
40
commit_message
stringlengths
15
198
ground_truth
stringlengths
26
155
main_code
stringlengths
176
2.5k
context
stringlengths
91
9.37k
coeditor.encoding/truncate_section
Modified
temp-1
0a557eb3eef82b76ad61c98704f8bfdf960a3ca1
Improve encoder performance.
<0>:<add> assert_eq(direction.value, TruncateAt.Right.value)
# module: coeditor.encoding def truncate_section( sec: TokenSeq, direction: TruncateAt, limit: int, add_bos: bool = True, ) -> TokenSeq: if len(sec) <= limit: return sec + if direction.value == TruncateAt.Left.value: - if direction == TruncateAt.Left: sec = sec[-limit:] if add_bos and sec: sec[0] = BOS_id else: - assert_eq(direction, TruncateAt.Right) <0> sec = sec[:limit] if add_bos and sec: sec[-1] = EOS_id return sec
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding BOS_id = get_tk_id("<s>") EOS_id = get_tk_id("</s>") TruncateAt() at: coeditor.encoding.break_into_chunks chunks = list[TokenSeq]() L = len(tks) header = header_f(chunk_id) progress = chunk_size - len(header) - this_overlap ===========changed ref 0=========== # module: coeditor.encoding + def encode_diffs(diffs: Sequence[str]) -> TokenSeq: - def encode_diffs(diffs: list[str]) -> TokenSeq: """ A helper function to encode the diff lines (with '+', '-', or ' ' prefixes) into a token sequence with the special <add> and <del> tokens. """ + prefixes = list[TokenSeq]() + code_lines = list[str]() - tokens = TokenSeq() for i, diff in enumerate(diffs): if diff.startswith("+"): + prefixes.append([Add_id]) - tokens.append(Add_id) elif diff.startswith("-"): + prefixes.append([Del_id]) - tokens.append(Del_id) else: assert diff.startswith(" ") + prefixes.append([]) + code_lines.append(diff[1:]) + code_tks = _BaseTokenizer.encode("\n".join(code_lines), add_special_tokens=False) + code_lines = split_list(code_tks, Newline_id) + for i, line in enumerate(code_lines): + if prefixes[i]: + code_lines[i] = prefixes[i] + line + return join_list(code_lines, Newline_id) - tokens.extend(_BaseTokenizer.encode(diff[1:], add_special_tokens=False)) - if i < len(diffs) - 1: - tokens.append(Newline_id) - return tokens ===========changed ref 1=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: + match change: + case Modified(before=before, after=after, unchanged=unchanged): + if unchanged or before == after: + return encode_basic(before) + else: + diffs = change_to_line_diffs(change) - diffs = change_to_line_diffs(change) + return encode_diffs(diffs) - return encode_diffs(diffs) + case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier()), Newline_id) + tk = Add_id if isinstance(change, Added) else Del_id + return join_list([tk] + line for line in lines) + case _: + raise AssertionError(f"Not a change type: {change}") ===========changed ref 2=========== # module: coeditor.encoding def get_extra_id(i: int) -> int: assert 0 <= i < N_Extra_Ids + return _min_extra_id + (N_Extra_Ids - 1 - i) - return _Tokenizer.additional_special_tokens_ids[N_Extra_Ids - 1 - i]
coeditor.history/Modified.map
Modified
temp-1
0a557eb3eef82b76ad61c98704f8bfdf960a3ca1
Improve encoder performance.
<0>:<add> return Modified(f(self.before), f(self.after))
# module: coeditor.history @dataclass(frozen=True) class Modified(_ChangeBase[E1]): def map(self, f: Callable[[E1], T2]) -> "Modified[T2]": + if self.unchanged: + return Modified.from_unchanged(f(self.before)) + else: - return Modified(f(self.before), f(self.after)) <0>
===========unchanged ref 0=========== at: coeditor.common T2 = TypeVar("T2") at: coeditor.history E1 = TypeVar("E1", covariant=True) at: coeditor.history.Modified before: E1 after: E1 unchanged: bool = False at: typing Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: coeditor.encoding def get_extra_id(i: int) -> int: assert 0 <= i < N_Extra_Ids + return _min_extra_id + (N_Extra_Ids - 1 - i) - return _Tokenizer.additional_special_tokens_ids[N_Extra_Ids - 1 - i] ===========changed ref 1=========== # module: coeditor.encoding def truncate_section( sec: TokenSeq, direction: TruncateAt, limit: int, add_bos: bool = True, ) -> TokenSeq: if len(sec) <= limit: return sec + if direction.value == TruncateAt.Left.value: - if direction == TruncateAt.Left: sec = sec[-limit:] if add_bos and sec: sec[0] = BOS_id else: + assert_eq(direction.value, TruncateAt.Right.value) - assert_eq(direction, TruncateAt.Right) sec = sec[:limit] if add_bos and sec: sec[-1] = EOS_id return sec ===========changed ref 2=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: + match change: + case Modified(before=before, after=after, unchanged=unchanged): + if unchanged or before == after: + return encode_basic(before) + else: + diffs = change_to_line_diffs(change) - diffs = change_to_line_diffs(change) + return encode_diffs(diffs) - return encode_diffs(diffs) + case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier()), Newline_id) + tk = Add_id if isinstance(change, Added) else Del_id + return join_list([tk] + line for line in lines) + case _: + raise AssertionError(f"Not a change type: {change}") ===========changed ref 3=========== # module: coeditor.encoding + def encode_diffs(diffs: Sequence[str]) -> TokenSeq: - def encode_diffs(diffs: list[str]) -> TokenSeq: """ A helper function to encode the diff lines (with '+', '-', or ' ' prefixes) into a token sequence with the special <add> and <del> tokens. """ + prefixes = list[TokenSeq]() + code_lines = list[str]() - tokens = TokenSeq() for i, diff in enumerate(diffs): if diff.startswith("+"): + prefixes.append([Add_id]) - tokens.append(Add_id) elif diff.startswith("-"): + prefixes.append([Del_id]) - tokens.append(Del_id) else: assert diff.startswith(" ") + prefixes.append([]) + code_lines.append(diff[1:]) + code_tks = _BaseTokenizer.encode("\n".join(code_lines), add_special_tokens=False) + code_lines = split_list(code_tks, Newline_id) + for i, line in enumerate(code_lines): + if prefixes[i]: + code_lines[i] = prefixes[i] + line + return join_list(code_lines, Newline_id) - tokens.extend(_BaseTokenizer.encode(diff[1:], add_special_tokens=False)) - if i < len(diffs) - 1: - tokens.append(Newline_id) - return tokens
coeditor.common/split_list
Modified
temp-1
abb5f1bd488da1a987cc025440e395f8eeac3e1e
Fix tokenize_datasets to avoid duplicating lists.
<0>:<add> result.append(lst[ptr:])
# module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ result = list[list[T1]]() + ptr = 0 + for i, item in enumerate(lst): - buff = list[T1]() - for item in lst: if item == sep: + result.append(lst[ptr:i]) - result.append(buff) - buff = list[T1]() - else: - buff.append(item) - result.append(buff) + ptr = i + 1 <0> return result
===========unchanged ref 0=========== at: coeditor.common T1 = TypeVar("T1")
coeditor.ctx_change_encoder/TkC3Problem.meta_data_lines
Modified
temp-1
abb5f1bd488da1a987cc025440e395f8eeac3e1e
Fix tokenize_datasets to avoid duplicating lists.
<0>:<add> f"commit: {self.commit}",
# module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): def meta_data_lines(self) -> list[str]: return [ f"path: {self.path}", f"n_references: {len(self.references)}", f"total_reference_tks: {sum(len(ref) for ref in self.references)}", + f"project: {self.project}", - f"src_info: {self.src_info}", <0> ]
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.ctx_change_encoder.TkC3Problem input_tks: TokenSeq output_tks: TokenSeq path: ProjectPath change_type: Change[None] named_references: Sequence[tuple[str, TokenSeq]] project: str commit: CommitInfo | None at: coeditor.encoding.TokenizedEdit input_tks: TokenSeq output_tks: TokenSeq main_tks: TokenSeq path: ProjectPath change_type: Change[None] BAD_DELETE = encode_single_line("((bad delete))") all_ctxs(self) -> dict[str, TokenSeq] meta_data_lines(self) -> list[str] show(self, pred_tks: TokenSeq | None=None, skip_ctx: bool=False, skip_meta: bool=False) -> str ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." input_tks: TokenSeq output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant named_references: Sequence[tuple[str, TokenSeq]] + project: str + commit: CommitInfo | None - src_info: dict[str, Any] ===========changed ref 1=========== # module: coeditor.ctx_change_encoder PyFullName = NewType("PyFullName", str) - ===========changed ref 2=========== # module: coeditor.ctx_change_encoder + class SrcInfo(TypedDict): + project: str + commit: CommitInfo | None + ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + src_info: SrcInfo - src_info: dict[str, Any] ===========changed ref 4=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ result = list[list[T1]]() + ptr = 0 + for i, item in enumerate(lst): - buff = list[T1]() - for item in lst: if item == sep: + result.append(lst[ptr:i]) - result.append(buff) - buff = list[T1]() - else: - buff.append(item) - result.append(buff) + ptr = i + 1 + result.append(lst[ptr:]) return result
coeditor.ctx_change_encoder/C3ProblemTokenizer.__post_init__
Modified
temp-1
abb5f1bd488da1a987cc025440e395f8eeac3e1e
Fix tokenize_datasets to avoid duplicating lists.
<0>:<add> self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=500)
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def __post_init__(self): - self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=5000) <0>
===========unchanged ref 0=========== at: coeditor.ctx_change_encoder.C3ProblemTokenizer VERSION = "1.0" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 max_lines_to_edit: int = 20 skip_unchanged_problems: bool = True ===========changed ref 0=========== # module: coeditor.ctx_change_encoder - _ObjId = NewType("_ObjId", int) - ===========changed ref 1=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): def meta_data_lines(self) -> list[str]: return [ f"path: {self.path}", f"n_references: {len(self.references)}", f"total_reference_tks: {sum(len(ref) for ref in self.references)}", + f"project: {self.project}", + f"commit: {self.commit}", - f"src_info: {self.src_info}", ] ===========changed ref 2=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." input_tks: TokenSeq output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant named_references: Sequence[tuple[str, TokenSeq]] + project: str + commit: CommitInfo | None - src_info: dict[str, Any] ===========changed ref 3=========== # module: coeditor.ctx_change_encoder PyFullName = NewType("PyFullName", str) - ===========changed ref 4=========== # module: coeditor.ctx_change_encoder + class SrcInfo(TypedDict): + project: str + commit: CommitInfo | None + ===========changed ref 5=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + src_info: SrcInfo - src_info: dict[str, Any] ===========changed ref 6=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ result = list[list[T1]]() + ptr = 0 + for i, item in enumerate(lst): - buff = list[T1]() - for item in lst: if item == sep: + result.append(lst[ptr:i]) - result.append(buff) - buff = list[T1]() - else: - buff.append(item) - result.append(buff) + ptr = i + 1 + result.append(lst[ptr:]) return result
coeditor.ctx_change_encoder/C3ProblemTokenizer.tokenize_problem
Modified
temp-1
abb5f1bd488da1a987cc025440e395f8eeac3e1e
Fix tokenize_datasets to avoid duplicating lists.
<0>:<add> commit=problem.src_info["commit"],
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: <s> lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, ) above_chunks = [ (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks) ] below_chunks = [ (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks) ] all_refs = above_chunks + below_chunks + named_references return TkC3Problem( scope_tks + chunk_input, chunk_output, path=span.headers[-1].path, change_type=span.change.map(lambda _: None), named_references=all_refs, + project=problem.src_info["project"], - src_info=problem.src_info, <0> ) problems = list[TkC3Problem]() for l in range(len(tk_delta.deltas) + 1): finished = l == len(tk_delta.deltas) input_growth = len(origin_lines[l]) + 2 if l < len(origin_lines) else 1 if ( finished or chunk_lines >= self.max_lines_to_edit or len(chunk_input) + input_growth > input_limit ): if has_change(chunk_output): problems.append(get_problem(chunk_input, chunk_output)) if len(problems) >= self.max_chunks_per_elem: break if finished: break chunk_main_input = join_list(origin_lines[chunk_start_l:l], Newline_id) chunk_main_delta = tk_delta.for_input</s>
===========above chunk 0=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: # offset: -1 <s> chunk_start_l = 0 scope_tks = self._encode_headers(span.headers, 0) chunk_input = TokenSeq() input_limit = self.max_query_tks - len(scope_tks) chunk_lines = 0 chunk_output = TokenSeq() prev_change_tks = TokenSeq() def get_problem(chunk_input, chunk_output): # try move some prev_change_tks into the input above_tks = prev_change_tks below_tks = join_list(origin_lines[l:], Newline_id) chunk_input, above_tks, below_tks = self._inline_some_context( chunk_input, above_tks, below_tks, input_limit ) # limit the input size if it's too long (can happen for later chunks) chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit) chunk_output = truncate_output_tks(chunk_input, chunk_output) chunk_output = truncate_section( chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False ) above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) if finished: below_chunks = [] else: below_chunks = break_into_chunks( below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_t</s> ===========above chunk 1=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: # offset: -2 span = problem.span named_references = list[tuple[str, TokenSeq]]() # compute the references that are relevant to this span relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes) for i, chunk in enumerate(relevant_chunks): named_references.append((f"changed ref {i}", chunk)) relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged) for i, chunk in enumerate(relevant_chunks): named_references.append((f"unchanged ref {i}", chunk)) diffs = change_to_line_diffs(span.change) original, delta = line_diffs_to_original_delta(diffs) origin_lines = split_list(encode_basic(original), Newline_id) tk_delta = delta.to_tk_delta() chunk_id = 0 chunk_start_l = 0 scope_tks = self._encode_headers(span.headers, 0) </s> ===========below chunk 0=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: # offset: 1 <s>lines[chunk_start_l:l], Newline_id) chunk_main_delta = tk_delta.for_input_range((chunk_start_l, l)) chunk_main_change = chunk_main_delta.to_change_tks(chunk_main_input) prev_change_tks.extend(chunk_main_change) prev_change_tks.append(Newline_id) chunk_id += 1 chunk_input = TokenSeq() chunk_lines = 0 chunk_output = TokenSeq() chunk_start_l = l chunk_input.append(get_extra_id(chunk_lines)) if l < len(origin_lines): chunk_input.extend(origin_lines[l]) chunk_input.append(Newline_id) line_change = join_list(tk_delta.deltas[l], Newline_id) chunk_output.append(get_extra_id(chunk_lines)) chunk_output.extend(line_change) if line_change and line_change[-1] != Del_id: chunk_output.append(Newline_id) chunk_lines += 1 return problems ===========unchanged ref 0=========== at: cachetools.lru LRUCache(maxsize: int, getsizeof: Optional[Callable[[_VT], int]]=...) at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.ctx_change_encoder C3Problem(span: ChangedCodeSpan, relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], src_info: SrcInfo) TkC3Problem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]], project: str, commit: CommitInfo | None) at: coeditor.ctx_change_encoder.C3Problem span: ChangedCodeSpan relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] src_info: SrcInfo at: coeditor.ctx_change_encoder.C3ProblemTokenizer max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_lines_to_edit: int = 20 ref_chunk_overlap: int = 32 max_chunks_per_elem: int = 4 _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq] _group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] _group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
coeditor.ctx_change_encoder/C3ProblemTokenizer.tokenize_datasets
Modified
temp-1
abb5f1bd488da1a987cc025440e395f8eeac3e1e
Fix tokenize_datasets to avoid duplicating lists.
<0>:<add> datasets[key2split[key]].extend(edits)
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_datasets( self, split2problems: Mapping[str, Sequence[C3Problem]], max_workers: int | None = None, + ) -> dict[str, list[TkC3Problem]]: - ) -> dict[str, Sequence[TkC3Problem]]: + def get_key(p: C3Problem): + return (p.src_info["project"], not_none(p.src_info["commit"]).hash) + + # we want problems from the same commit to be sent in groups + key2split = { + get_key(p): name for name, split in split2problems.items() for p in split + } - datasets = dict[str, Sequence[TkC3Problem]]() all_problems = join_list(split2problems.values()) + key2problems = groupby(all_problems, lambda p: get_key(p)) + all_probs = list(key2problems.values()) all_edits = pmap( + self._tokenize_problems, - self.tokenize_problem, + all_probs, - all_problems, desc=f"Tokenizing dataset", + tqdm_args={"unit": "commit"}, max_workers=max_workers, ) - start = 0 - for name, split in split2problems.items(): - split_edits = join_list(all_edits[start : start + len(split)]) - datasets[name] = split_edits - start += len(split) + datasets = {name: list[TkC3Problem]() for name in split2problems} + for edits, key in zip(all_edits, key2problems.keys()): <0> return datasets
===========unchanged ref 0=========== at: coeditor._utils not_none(x: Optional[T1]) -> T1 at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.ctx_change_encoder C3Problem(span: ChangedCodeSpan, relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], src_info: SrcInfo) TkC3Problem(input_tks: TokenSeq, output_tks: TokenSeq, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TokenSeq]], project: str, commit: CommitInfo | None) at: coeditor.ctx_change_encoder.C3Problem src_info: SrcInfo at: coeditor.ctx_change_encoder.C3ProblemTokenizer tokenize_problem(problem: C3Problem) -> Sequence[TkC3Problem] tokenize_problem(self, problem: C3Problem) -> Sequence[TkC3Problem] at: coeditor.ctx_change_encoder.C3ProblemTokenizer.tokenize_problem chunk_output = TokenSeq() problems = list[TkC3Problem]() line_change = join_list(tk_delta.deltas[l], Newline_id) at: coeditor.encoding Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") at: typing Mapping = _alias(collections.abc.Mapping, 2) Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." input_tks: TokenSeq output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant named_references: Sequence[tuple[str, TokenSeq]] + project: str + commit: CommitInfo | None - src_info: dict[str, Any] ===========changed ref 1=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + src_info: SrcInfo - src_info: dict[str, Any] ===========changed ref 2=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: span = problem.span named_references = list[tuple[str, TokenSeq]]() # compute the references that are relevant to this span relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes) for i, chunk in enumerate(relevant_chunks): named_references.append((f"changed ref {i}", chunk)) relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged) for i, chunk in enumerate(relevant_chunks): named_references.append((f"unchanged ref {i}", chunk)) diffs = change_to_line_diffs(span.change) original, delta = line_diffs_to_original_delta(diffs) origin_lines = split_list(encode_basic(original), Newline_id) tk_delta = delta.to_tk_delta() chunk_id = 0 chunk_start_l = 0 scope_tks = self._encode_headers(span.headers, 0) chunk_input = TokenSeq() input_limit = self.max_query_tks - len(scope_tks) chunk_lines = 0 chunk_output = TokenSeq() prev_change_tks = TokenSeq() def get_problem(chunk_input, chunk_output): # try move some prev_change_tks into the input above_tks = prev_change_tks below_tks = join_list(origin_lines[l:], Newline_id) chunk_input, above_tks, below_tks = self._inline_some_context( chunk_input, above_tks, below_tks, input_limit ) # limit the input size if it's too long (can happen for later chunks) chunk_input = truncate_section(chunk_input,</s> ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> Sequence[TkC3Problem]: # offset: 1 <s> limit the input size if it's too long (can happen for later chunks) chunk_input = truncate_section(chunk_input, TruncateAt.Right, input_limit) chunk_output = truncate_output_tks(chunk_input, chunk_output) chunk_output = truncate_section( chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False ) above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) if finished: below_chunks = [] else: below_chunks = break_into_chunks( below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, ) above_chunks = [ (f"above chunk {i}", chunk) for i, chunk in enumerate(above_chunks) ] below_chunks = [ (f"below chunk {i}", chunk) for i, chunk in enumerate(below_chunks) ] all_refs = above_chunks + below_chunks + named_references return TkC3Problem( scope_tks + chunk_input, chunk_output, path=span.headers[-1].path, change_type=span.change.map(lambda _: None), named_references=all_refs, + project=</s>
coeditor.encoding/StrDelta.apply_to_input
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> if delta := self._deltas.get(len(lines)):
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] <0> for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
===========unchanged ref 0=========== at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping get(key: _KT) -> Optional[_VT_co] get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]]
coeditor.encoding/StrDelta.__repr__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a)
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) <0> return f"StrDelta(\n{line_diffs}\n)"
===========unchanged ref 0=========== at: coeditor.encoding.StrDelta.__repr__ line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/StrDelta.for_input_range
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b}
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range - new_delta = self.deltas[line_range[0] : line_range[1]] <0> return StrDelta(new_delta)
===========unchanged ref 0=========== at: coeditor.encoding StrDelta(_deltas: Mapping[int, tuple[str, ...]]) at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/StrDelta.__bool__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> return bool(self._deltas)
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: - return any(bool(a) for a in self.deltas) <0>
===========unchanged ref 0=========== at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/StrDelta.to_tk_delta
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> deltas[k] = tuple(line_tk_delta)
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") - deltas.append(tuple(line_tk_delta)) <0> return TkDelta(deltas)
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) encode_basic(text: str, add_special_tokens=False) -> TokenSeq TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/TkDelta.apply_to_input
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> if delta := self._deltas.get(len(lines)):
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] <0> for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id)
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] at: coeditor.encoding.TkDelta.apply_to_input lines = split_list(input, Newline_id) new_lines = list[TokenSeq]() at: typing.Mapping get(key: _KT) -> Optional[_VT_co] get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 8=========== # module: coeditor.encoding def line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta]: input_lines: list[str] = [] line_delta: list[str] = [] deltas = dict[int, tuple[str, ...]]() for diff_line in diffs: assert diff_line assert not diff_line.endswith("\n"), f"bad diff line: {repr(diff_line)}" if diff_line[0] == "+": line_delta.append(diff_line) elif diff_line[0] == "-": line_delta.append("-") deltas[len(input_lines)] = tuple(line_delta) input_lines.append(diff_line[1:]) line_delta = [] else: assert diff_line[0] == " ", f"unexpected diff_line: {repr(diff_line)}" if line_delta: deltas[len(input_lines)] = tuple(line_delta) line_delta = [] input_lines.append(diff_line[1:]) if line_delta: deltas[len(input_lines)] = tuple(line_delta) - last_change_l = len(input_lines) - else: - last_change_l = len(input_lines) - 1 - str_delta = StrDelta([deltas.get(i, ()) for i in range(last_change_l + 1)]) - + str_delta = StrDelta(deltas) input = "\n".join(input_lines) - delta_size = len(str_delta.deltas) - if not (delta_size - 1 <= count_lines(input) <= delta_size): - print_err("input:", repr(input)) - print_err("deltas:", str_delta.deltas) - print_err("diffs:", diffs) - raise AssertionError("Invalid delta output.") return input, str_delta
coeditor.encoding/TkDelta.to_change_tks
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> if delta := self._deltas.get(len(lines)):
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_change_tks(self, input: TokenSeq) -> TokenSeq: + lines = split_list(input, Newline_id) - lines = split_list(input, Newline_id) if input else [] - if len(lines) > len(self.deltas): - print_err(f"{self.deltas=}") - print_err(f"{input=}") - raise ValueError( - f"Input is longer than delta: {len(lines)=} > {len(self.deltas)=}" - ) + new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action) elif action[0] == Del_id: deleted = True if deleted: new_lines.append([Del_id] + line) else: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] <0> for action in delta: if action[0] == Add_id: new_lines.append(action) return join_list(new_lines, Newline_id)
===========unchanged ref 0=========== at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] at: coeditor.encoding.TkDelta.to_change_tks lines = split_list(input, Newline_id) new_lines = list[TokenSeq]() at: typing.Mapping get(key: _KT) -> Optional[_VT_co] get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id) ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/TkDelta.__repr__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> for k, a in self._deltas.items()
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" - for l, a in enumerate(self.deltas) <0> if a ) return f"TkDelta(\n{line_diffs}\n)"
===========unchanged ref 0=========== at: coeditor.encoding StrDelta(_deltas: Mapping[int, tuple[str, ...]]) TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] at: coeditor.encoding.TkDelta.for_input_range new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id) ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_change_tks(self, input: TokenSeq) -> TokenSeq: + lines = split_list(input, Newline_id) - lines = split_list(input, Newline_id) if input else [] - if len(lines) > len(self.deltas): - print_err(f"{self.deltas=}") - print_err(f"{input=}") - raise ValueError( - f"Input is longer than delta: {len(lines)=} > {len(self.deltas)=}" - ) + new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action) elif action[0] == Del_id: deleted = True if deleted: new_lines.append([Del_id] + line) else: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action) return join_list(new_lines, Newline_id) ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): lines = input.split("\n") - assert len(lines) <= len(self.deltas) new_lines = list[str]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.encoding/TkDelta.for_input_range
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b}
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range - new_delta = self.deltas[line_range[0] : line_range[1]] <0> return TkDelta(new_delta)
===========unchanged ref 0=========== at: coeditor.encoding Add_id = get_tk_id(Add) decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_change_tks(self, input: TokenSeq) -> TokenSeq: + lines = split_list(input, Newline_id) - lines = split_list(input, Newline_id) if input else [] - if len(lines) > len(self.deltas): - print_err(f"{self.deltas=}") - print_err(f"{input=}") - raise ValueError( - f"Input is longer than delta: {len(lines)=} > {len(self.deltas)=}" - ) + new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action) elif action[0] == Del_id: deleted = True if deleted: new_lines.append([Del_id] + line) else: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action) return join_list(new_lines, Newline_id)
coeditor.encoding/TkDelta.__bool__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> return bool(self._deltas)
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: - return any(bool(a) for a in self.deltas) <0>
===========unchanged ref 0=========== at: coeditor.encoding.TkDelta.to_str_delta line_str_delta = list[str]() ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return TkDelta(new_delta) ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id) ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 11=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_change_tks(self, input: TokenSeq) -> TokenSeq: + lines = split_list(input, Newline_id) - lines = split_list(input, Newline_id) if input else [] - if len(lines) > len(self.deltas): - print_err(f"{self.deltas=}") - print_err(f"{input=}") - raise ValueError( - f"Input is longer than delta: {len(lines)=} > {len(self.deltas)=}" - ) + new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action) elif action[0] == Del_id: deleted = True if deleted: new_lines.append([Del_id] + line) else: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action) return join_list(new_lines, Newline_id)
coeditor.encoding/TkDelta.to_str_delta
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> deltas[k] = tuple(line_str_delta)
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_str_delta(self) -> StrDelta: + deltas = dict[int, tuple[str, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: + line_str_delta = list[str]() - line_str_delta = [] for action in line_delta: if action[0] == Add_id: line_str_delta.append(f"+{decode_tokens(action[1:])}") elif action[0] == Del_id: line_str_delta.append("-") else: raise ValueError(f"Invalid action: {decode_tokens(action)}") - deltas.append(tuple(line_str_delta)) <0> return StrDelta(deltas)
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] at: coeditor.encoding Newline_id = get_tk_id("\n") StrDelta(_deltas: Mapping[int, tuple[str, ...]]) TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.StrDelta num_changes(self) -> int at: coeditor.encoding.TkDelta.to_str_delta line_str_delta = list[str]() at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return TkDelta(new_delta) ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def apply_to_input(self, input: TokenSeq): lines = split_list(input, Newline_id) - assert len(lines) <= len(self.deltas) new_lines = list[TokenSeq]() + for i, line in enumerate(lines): - for line, delta in zip(lines, self.deltas): deleted = False + if delta := self._deltas.get(i): - if delta: for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) elif action[0] == Del_id: deleted = True if not deleted: new_lines.append(line) - if len(self.deltas) == len(lines) + 1: - delta = self.deltas[-1] + if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == Add_id: new_lines.append(action[1:]) return join_list(new_lines, Newline_id) ===========changed ref 11=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas)
coeditor.encoding/truncate_section
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> sec = sec[:limit]
# module: coeditor.encoding def truncate_section( sec: TokenSeq, direction: TruncateAt, limit: int, add_bos: bool = True, + inplace: bool = False, ) -> TokenSeq: if len(sec) <= limit: return sec if direction.value == TruncateAt.Left.value: + if inplace: + del sec[:-limit] + else: + sec = sec[-limit:] - sec = sec[-limit:] if add_bos and sec: sec[0] = BOS_id else: assert_eq(direction.value, TruncateAt.Right.value) + if inplace: + del sec[limit:] + else: - sec = sec[:limit] <0> if add_bos and sec: sec[-1] = EOS_id return sec
===========unchanged ref 0=========== at: coeditor.encoding BOS_id = get_tk_id("<s>") EOS_id = get_tk_id("</s>") at: coeditor.encoding.break_into_chunks chunks = list[TokenSeq]() L = len(tks) header = header_f(chunk_id) progress = chunk_size - len(header) - this_overlap start = max(0, end - progress) start = i - this_overlap ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return TkDelta(new_delta) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_str_delta(self) -> StrDelta: + deltas = dict[int, tuple[str, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: + line_str_delta = list[str]() - line_str_delta = [] for action in line_delta: if action[0] == Add_id: line_str_delta.append(f"+{decode_tokens(action[1:])}") elif action[0] == Del_id: line_str_delta.append("-") else: raise ValueError(f"Invalid action: {decode_tokens(action)}") + deltas[k] = tuple(line_str_delta) - deltas.append(tuple(line_str_delta)) return StrDelta(deltas) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 11=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 12=========== # module: coeditor.encoding + def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]: + diffs = split_list(change, Newline_id) + input_lines: list[TokenSeq] = [] + line_delta: list[TokenSeq] = [] + deltas = dict[int, tuple[TokenSeq, ...]]() + + for diff_line in diffs: + if diff_line and diff_line[0] == Add_id: + line_delta.append(diff_line) + elif diff_line and diff_line[0] == Del_id: + line_delta.append([Del_id]) + deltas[len(input_lines)] = tuple(line_delta) + del diff_line[:1] + input_lines.append(diff_line) + line_delta = [] + else: + if line_delta: + deltas[len(input_lines)] = tuple(line_delta) + line_delta = [] + input_lines.append(diff_line) + if line_delta: + deltas[len(input_lines)] = tuple(line_delta) + + str_delta = TkDelta(deltas) + input = join_list(input_lines, Newline_id) + return input, str_delta +
coeditor.encoding/truncate_sections
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> truncate_section(tks, truncate_dir, section_lens[i], add_bos, inplace=inplace)
# module: coeditor.encoding def truncate_sections( total_limit: int, *sections: tuple[TokenSeq, TruncateAt], add_bos: bool, + inplace: bool = False, ) -> tuple[TokenSeq, ...]: """Truncate a list of token sequences to fit within a total length limit. Earlier sections have priority over later sections. """ # first, reserve equal space to each section section_lens = [total_limit // len(sections) for _ in sections] remaining = total_limit for i, (tks, _) in enumerate(sections): l = min(len(tks), section_lens[i]) remaining -= l section_lens[i] = l assert remaining >= 0 # for the unused space, assign to ealier sections when possible for i, (tks, _) in enumerate(sections): if remaining <= 0: break inc = min(remaining, len(tks) - section_lens[i]) section_lens[i] += inc remaining -= inc return tuple( - truncate_section(tks, truncate_dir, section_lens[i], add_bos) <0> for i, (tks, truncate_dir) in enumerate(sections) )
===========unchanged ref 0=========== at: coeditor._utils assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding BOS_id = get_tk_id("<s>") EOS_id = get_tk_id("</s>") TruncateAt() at: coeditor.encoding.TruncateAt Left = 0 Right = 1 at: enum.Enum name: str value: Any _name_: str _value_: Any _member_names_: List[str] # undocumented _member_map_: Dict[str, Enum] # undocumented _value2member_map_: Dict[int, Enum] # undocumented _ignore_: Union[str, List[str]] _order_: str __order__: str ===========changed ref 0=========== # module: coeditor.encoding def truncate_section( sec: TokenSeq, direction: TruncateAt, limit: int, add_bos: bool = True, + inplace: bool = False, ) -> TokenSeq: if len(sec) <= limit: return sec if direction.value == TruncateAt.Left.value: + if inplace: + del sec[:-limit] + else: + sec = sec[-limit:] - sec = sec[-limit:] if add_bos and sec: sec[0] = BOS_id else: assert_eq(direction.value, TruncateAt.Right.value) + if inplace: + del sec[limit:] + else: + sec = sec[:limit] - sec = sec[:limit] if add_bos and sec: sec[-1] = EOS_id return sec ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return TkDelta(new_delta) ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_str_delta(self) -> StrDelta: + deltas = dict[int, tuple[str, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: + line_str_delta = list[str]() - line_str_delta = [] for action in line_delta: if action[0] == Add_id: line_str_delta.append(f"+{decode_tokens(action[1:])}") elif action[0] == Del_id: line_str_delta.append("-") else: raise ValueError(f"Invalid action: {decode_tokens(action)}") + deltas[k] = tuple(line_str_delta) - deltas.append(tuple(line_str_delta)) return StrDelta(deltas) ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 11=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 12=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]]
tests.coeditor.test_code_change/TestChangedSpan.check_changed_spans
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> assert_eq(line_change, n, extra_message=lambda: f"{i=}, {span.change=}")
# module: tests.coeditor.test_code_change class TestChangedSpan: @staticmethod def check_changed_spans( changed_spans: Sequence[ChangedSpan], *expects: tuple[type, int] ): print(f"{changed_spans=}\nchanges={[cs.change for cs in changed_spans]}") assert_eq( len(changed_spans), len(expects), ) for i, (change_type, n) in enumerate(expects): span = changed_spans[i] assert_eq(type(span.change), change_type) nl_change = span.change.map(count_lines) line_change = nl_change.later() - nl_change.earlier() - assert_eq(line_change, n) <0>
===========unchanged ref 0=========== at: tests.coeditor.test_code_change.TestChangedSpan code1 = dedent( """\ import os x = 1 y = x + 1 def f1(): global x x *= 5 return x if __name__ == "__main__": print(f1() + x) """ ) scope1 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code1)) ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta) ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return TkDelta(new_delta) ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self.deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 10=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" - f" {l}: {tuple(map(decode_tokens, a))}" + for k, a in self._deltas.items() - for l, a in enumerate(self.deltas) if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 11=========== # module: coeditor.encoding def truncate_section( sec: TokenSeq, direction: TruncateAt, limit: int, add_bos: bool = True, + inplace: bool = False, ) -> TokenSeq: if len(sec) <= limit: return sec if direction.value == TruncateAt.Left.value: + if inplace: + del sec[:-limit] + else: + sec = sec[-limit:] - sec = sec[-limit:] if add_bos and sec: sec[0] = BOS_id else: assert_eq(direction.value, TruncateAt.Right.value) + if inplace: + del sec[limit:] + else: + sec = sec[:limit] - sec = sec[:limit] if add_bos and sec: sec[-1] = EOS_id return sec ===========changed ref 12=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": + deltas = dict[int, tuple[TokenSeq, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") + deltas[k] = tuple(line_tk_delta) - deltas.append(tuple(line_tk_delta)) return TkDelta(deltas) ===========changed ref 13=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def to_str_delta(self) -> StrDelta: + deltas = dict[int, tuple[str, ...]]() - deltas = [] + for k, line_delta in self._deltas.items(): - for line_delta in self.deltas: + line_str_delta = list[str]() - line_str_delta = [] for action in line_delta: if action[0] == Add_id: line_str_delta.append(f"+{decode_tokens(action[1:])}") elif action[0] == Del_id: line_str_delta.append("-") else: raise ValueError(f"Invalid action: {decode_tokens(action)}") + deltas[k] = tuple(line_str_delta) - deltas.append(tuple(line_str_delta)) return StrDelta(deltas)
coeditor.ctx_change_encoder/ChangedHeader.__repr__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> f"change={self.change_tks})"
# module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: def __repr__(self) -> str: return ( f"ChangedHeader(path={self.path}, range={self.line_range}, " - f"change={self.change})" <0> )
===========unchanged ref 0=========== at: coeditor.ctx_change_encoder.ChangedHeader change_tks: TkArray at: spot.static_analysis ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: """Represents the changes made to a header. This format does not store parent syntax nodes and is more suitable for serialization. """ + change_tks: TkArray - change: Change[str] # below are pre-edit attributes type: str line_range: LineRange path: ProjectPath ===========changed ref 1=========== + # module: coeditor.tk_array + + ===========changed ref 2=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def tolist(self) -> TokenSeq: + ... + ===========changed ref 3=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def __len__(self) -> int: + ... + ===========changed ref 4=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 5=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 6=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + data: np.ndarray + ===========changed ref 7=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def __len__(self) -> int: + return len(self.data) + ===========changed ref 8=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def tolist(self) -> TokenSeq: + return self.data.tolist() + ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 10=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def new(tks: Sequence[int]) -> "TkArray": + return _NumpyTkArray(np.array(tks, dtype=np.int32)) + ===========changed ref 11=========== + # module: coeditor.tk_array + class TkArray(ABC): + def __repr__(self) -> str: + return f"TkArray(length={len(self)}, text={repr(self._peek())})" + ===========changed ref 12=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 13=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 14=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def join(segs: Iterable["TkArray"], sep: int | None) -> "TkArray": + return _JoinedTkArray(tuple(segs), sep, sum(len(seg) for seg in segs)) + ===========changed ref 15=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def tolist(self) -> TokenSeq: + return truncate_section( + self.original.tolist(), self.direction, self.length, inplace=True + ) + ===========changed ref 16=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) + ===========changed ref 17=========== + # module: coeditor.tk_array + class TkArray(ABC): + def truncate(self, dir: TruncateAt, new_len: int) -> "TkArray": + if new_len >= len(self): + return self + return _TruncatedTkArray(self, dir, new_len) + ===========changed ref 18=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 19=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + "A chain-like data structure for concatenated `TkArray`s." + original: TkArray + direction: TruncateAt + length: int + ===========changed ref 20=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + "A chain-like data structure for concatenated `TkArray`s." + + segs: tuple[TkArray, ...] + sep: int | None + length: int + ===========changed ref 21=========== + # module: coeditor.tk_array + class TkArray(ABC): + def _peek(self) -> str: + tks = self.tolist() + text = decode_tokens(tks) + if len(text) > 100: + text = text[:100] + "..." + return text + ===========changed ref 22=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: """The Tokenized version of :class:`StrDelta`.""" + _deltas: Mapping[int, tuple[TokenSeq, ...]] - deltas: Sequence[tuple[TokenSeq, ...]] ===========changed ref 23=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + def tolist(self) -> TokenSeq: + result = TokenSeq() + for i, seg in enumerate(self.segs): + if self.sep is not None and i > 0: + result.append(self.sep) + result.extend(seg.tolist()) + return result + ===========changed ref 24=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: """Stores the line deltas for each line. A line delta is a list of added lines (starting with a '+') followed by optionally a `-` line (for deleting the current line).""" + _deltas: Mapping[int, tuple[str, ...]] - deltas: Sequence[tuple[str, ...]] ===========changed ref 25=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" + a, b = line_range + new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} - new_delta = self.deltas[line_range[0] : line_range[1]] return StrDelta(new_delta)
coeditor.ctx_change_encoder/TkC3Problem.references
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> return [ref for _, ref in self.named_references]
# module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): @property + def references(self) -> Sequence[TkArray]: - def references(self) -> Sequence[TokenSeq]: - return [ref for name, ref in self.named_references] <0>
===========unchanged ref 0=========== at: coeditor.ctx_change_encoder.C3ProblemGenerator VERSION = "2.1" max_span_lines: int = 500 max_lines_to_edit: int = 20 max_problems_per_elem: int = 4 at: coeditor.ctx_change_encoder.C3ProblemGenerator.process_change edit_ranges = list[range]() r = range(i, min(n_lines, i + self.max_lines_to_edit)) c = change_counts(r) ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." + input: TkArray + output: TkArray - input_tks: TokenSeq - output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant + named_references: Sequence[tuple[str, TkArray]] - named_references: Sequence[tuple[str, TokenSeq]] project: str commit: CommitInfo | None ===========changed ref 1=========== # module: coeditor.ctx_change_encoder class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): + VERSION = "2.1" - VERSION = "2.0" # change spans with more than this many lines will be ignored max_span_lines: int = 500 + max_lines_to_edit: int = 20 + max_problems_per_elem: int = 4 ===========changed ref 2=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: def __repr__(self) -> str: return ( f"ChangedHeader(path={self.path}, range={self.line_range}, " + f"change={self.change_tks})" - f"change={self.change})" ) ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + change_tks: TkArray - change: Change[str] # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 4=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan + # the lines to be edited + edit_lines: Collection[int] # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + change_type: Change[None] src_info: SrcInfo ===========changed ref 5=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: """Represents the changes made to a header. This format does not store parent syntax nodes and is more suitable for serialization. """ + change_tks: TkArray - change: Change[str] # below are pre-edit attributes type: str line_range: LineRange path: ProjectPath ===========changed ref 6=========== + # module: coeditor.tk_array + + ===========changed ref 7=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def tolist(self) -> TokenSeq: + ... + ===========changed ref 8=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def __len__(self) -> int: + ... + ===========changed ref 9=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 10=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 11=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + data: np.ndarray + ===========changed ref 12=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def __len__(self) -> int: + return len(self.data) + ===========changed ref 13=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def tolist(self) -> TokenSeq: + return self.data.tolist() + ===========changed ref 14=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 15=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def new(tks: Sequence[int]) -> "TkArray": + return _NumpyTkArray(np.array(tks, dtype=np.int32)) + ===========changed ref 16=========== + # module: coeditor.tk_array + class TkArray(ABC): + def __repr__(self) -> str: + return f"TkArray(length={len(self)}, text={repr(self._peek())})" + ===========changed ref 17=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 18=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 19=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def join(segs: Iterable["TkArray"], sep: int | None) -> "TkArray": + return _JoinedTkArray(tuple(segs), sep, sum(len(seg) for seg in segs)) + ===========changed ref 20=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def tolist(self) -> TokenSeq: + return truncate_section( + self.original.tolist(), self.direction, self.length, inplace=True + ) + ===========changed ref 21=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) + ===========changed ref 22=========== + # module: coeditor.tk_array + class TkArray(ABC): + def truncate(self, dir: TruncateAt, new_len: int) -> "TkArray": + if new_len >= len(self): + return self + return _TruncatedTkArray(self, dir, new_len) + ===========changed ref 23=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return sum(len(a) for a in self._deltas.values()) + ===========changed ref 24=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + "A chain-like data structure for concatenated `TkArray`s." + original: TkArray + direction: TruncateAt + length: int +
coeditor.ctx_change_encoder/TkC3Problem.all_ctxs
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> return {name: ref.tolist() for name, ref in self.named_references}
# module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): def all_ctxs(self) -> dict[str, TokenSeq]: - return {name: ref for name, ref in self.named_references} <0>
===========unchanged ref 0=========== at: coeditor.ctx_change_encoder.C3ProblemGenerator.process_change processed_cspans = list[ChangedCodeSpan]() problems = list[C3Problem]() code_span = ChangedCodeSpan( headers=[to_header(cs) for cs in span.parent_scopes], change_tks=TkArray.new(change_tks), line_range=span.line_range, module=span.path.module, ) prob = C3Problem( code_span, r, relevant_changes=relevant_changes, relevant_unchanged=relevant_unchanged, change_type=span.change.map(lambda _: None), src_info=src_info, ) ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): @property + def references(self) -> Sequence[TkArray]: - def references(self) -> Sequence[TokenSeq]: + return [ref for _, ref in self.named_references] - return [ref for name, ref in self.named_references] ===========changed ref 1=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." + input: TkArray + output: TkArray - input_tks: TokenSeq - output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant + named_references: Sequence[tuple[str, TkArray]] - named_references: Sequence[tuple[str, TokenSeq]] project: str commit: CommitInfo | None ===========changed ref 2=========== # module: coeditor.ctx_change_encoder class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): + VERSION = "2.1" - VERSION = "2.0" # change spans with more than this many lines will be ignored max_span_lines: int = 500 + max_lines_to_edit: int = 20 + max_problems_per_elem: int = 4 ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: def __repr__(self) -> str: return ( f"ChangedHeader(path={self.path}, range={self.line_range}, " + f"change={self.change_tks})" - f"change={self.change})" ) ===========changed ref 4=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + change_tks: TkArray - change: Change[str] # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 5=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan + # the lines to be edited + edit_lines: Collection[int] # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + change_type: Change[None] src_info: SrcInfo ===========changed ref 6=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: """Represents the changes made to a header. This format does not store parent syntax nodes and is more suitable for serialization. """ + change_tks: TkArray - change: Change[str] # below are pre-edit attributes type: str line_range: LineRange path: ProjectPath ===========changed ref 7=========== + # module: coeditor.tk_array + + ===========changed ref 8=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def tolist(self) -> TokenSeq: + ... + ===========changed ref 9=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def __len__(self) -> int: + ... + ===========changed ref 10=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 11=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 12=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + data: np.ndarray + ===========changed ref 13=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def __len__(self) -> int: + return len(self.data) + ===========changed ref 14=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def tolist(self) -> TokenSeq: + return self.data.tolist() + ===========changed ref 15=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 16=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def new(tks: Sequence[int]) -> "TkArray": + return _NumpyTkArray(np.array(tks, dtype=np.int32)) + ===========changed ref 17=========== + # module: coeditor.tk_array + class TkArray(ABC): + def __repr__(self) -> str: + return f"TkArray(length={len(self)}, text={repr(self._peek())})" + ===========changed ref 18=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 19=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas) ===========changed ref 20=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def join(segs: Iterable["TkArray"], sep: int | None) -> "TkArray": + return _JoinedTkArray(tuple(segs), sep, sum(len(seg) for seg in segs)) + ===========changed ref 21=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def tolist(self) -> TokenSeq: + return truncate_section( + self.original.tolist(), self.direction, self.length, inplace=True + ) + ===========changed ref 22=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def num_changes(self) -> int: + "Return the number of changed lines in the delta." + return StrDelta.num_changes(cast(StrDelta, self)) +
coeditor.ctx_change_encoder/C3ProblemTokenizer.__post_init__
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> self._offset_cache = LRUCache[int, TkArray](maxsize=100)
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def __post_init__(self): - self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=500) <0>
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.ctx_change_encoder.TkC3Problem input: TkArray output: TkArray path: ProjectPath change_type: Change[None] named_references: Sequence[tuple[str, TkArray]] project: str commit: CommitInfo | None at: coeditor.encoding.TokenizedEdit input_tks: TokenSeq output_tks: TokenSeq main_tks: TokenSeq path: ProjectPath change_type: Change[None] BAD_DELETE = encode_single_line("((bad delete))") ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): + @property + def input_tks(self) -> TokenSeq: + return self.input.tolist() + ===========changed ref 1=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): + @property + def output_tks(self) -> TokenSeq: + return self.output.tolist() + ===========changed ref 2=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): def all_ctxs(self) -> dict[str, TokenSeq]: + return {name: ref.tolist() for name, ref in self.named_references} - return {name: ref for name, ref in self.named_references} ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): @property + def references(self) -> Sequence[TkArray]: - def references(self) -> Sequence[TokenSeq]: + return [ref for _, ref in self.named_references] - return [ref for name, ref in self.named_references] ===========changed ref 4=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: + VERSION = "2.0" - VERSION = "1.0" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 - max_lines_to_edit: int = 20 ref_chunk_overlap: int = 32 - max_chunks_per_elem: int = 4 - skip_unchanged_problems: bool = True ===========changed ref 5=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): "Tokenized contextual code change prediction problem." + input: TkArray + output: TkArray - input_tks: TokenSeq - output_tks: TokenSeq path: ProjectPath change_type: Change[None] # most relevant to least relevant + named_references: Sequence[tuple[str, TkArray]] - named_references: Sequence[tuple[str, TokenSeq]] project: str commit: CommitInfo | None ===========changed ref 6=========== # module: coeditor.ctx_change_encoder class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): + VERSION = "2.1" - VERSION = "2.0" # change spans with more than this many lines will be ignored max_span_lines: int = 500 + max_lines_to_edit: int = 20 + max_problems_per_elem: int = 4 ===========changed ref 7=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: def __repr__(self) -> str: return ( f"ChangedHeader(path={self.path}, range={self.line_range}, " + f"change={self.change_tks})" - f"change={self.change})" ) ===========changed ref 8=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + change_tks: TkArray - change: Change[str] # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 9=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan + # the lines to be edited + edit_lines: Collection[int] # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated + change_type: Change[None] src_info: SrcInfo ===========changed ref 10=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class ChangedHeader: """Represents the changes made to a header. This format does not store parent syntax nodes and is more suitable for serialization. """ + change_tks: TkArray - change: Change[str] # below are pre-edit attributes type: str line_range: LineRange path: ProjectPath ===========changed ref 11=========== + # module: coeditor.tk_array + + ===========changed ref 12=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def tolist(self) -> TokenSeq: + ... + ===========changed ref 13=========== + # module: coeditor.tk_array + class TkArray(ABC): + @abstractmethod + def __len__(self) -> int: + ... + ===========changed ref 14=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _TruncatedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 15=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _JoinedTkArray(TkArray): + def __len__(self) -> int: + return self.length + ===========changed ref 16=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + data: np.ndarray + ===========changed ref 17=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def __len__(self) -> int: + return len(self.data) + ===========changed ref 18=========== + # module: coeditor.tk_array + @dataclass(frozen=True) + class _NumpyTkArray(TkArray): + def tolist(self) -> TokenSeq: + return self.data.tolist() + ===========changed ref 19=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_line_change(self, line: int) -> tuple[TokenSeq, ...]: + return self._deltas.get(line, ()) + ===========changed ref 20=========== + # module: coeditor.tk_array + class TkArray(ABC): + @staticmethod + def new(tks: Sequence[int]) -> "TkArray": + return _NumpyTkArray(np.array(tks, dtype=np.int32)) + ===========changed ref 21=========== + # module: coeditor.tk_array + class TkArray(ABC): + def __repr__(self) -> str: + return f"TkArray(length={len(self)}, text={repr(self._peek())})" + ===========changed ref 22=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return bool(self._deltas) - return any(bool(a) for a in self.deltas)
coeditor.ctx_change_encoder/C3ProblemTokenizer._group_encode_changed_refs
Modified
temp-1
bcbd5c8b6ee7e867f66e2ccdd55dfd8e220b8ae8
Move lossless toknization logic and problem splitting logic into C3ProblemGenerator.
<0>:<add> join_list(segs),
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def _group_encode_changed_refs( self, changes: Sequence[ChangedCodeSpan] ) -> Sequence[TokenSeq]: <s> c.module) all_chunks = list[TokenSeq]() for change_group in module2changes.values(): change_group.sort(key=lambda c: c.line_range[0]) + segs = list[TokenSeq]() - file_tks = TokenSeq() # we'll add module as the chunk header, so we start within the module last_scope = change_group[0].headers[:1] for c in change_group: header_diff = list[ChangedHeader]() for i, h in enumerate(c.headers): if i >= len(last_scope) or h.path != last_scope[i].path: header_diff.append(h) if header_diff: header_tks = self._encode_headers(header_diff, 0) + segs.append(header_tks) - file_tks.extend(header_tks) - body_tks = self._encode_change(c.change.map(lambda c: c.strip("\n"))) - file_tks.extend(body_tks) + segs.append(c.change_tks.tolist()) + segs.append([Newline_id, Newline_id]) - file_tks.append(Newline_id) - file_tks.append(Newline_id) last_scope = c.headers + segs.append([Newline_id]) - mod_change = change_group[0].headers[:1] mod_chunks = break_into_chunks( - file_tks, <0> lambda i: self._encode_headers(mod_change, i), self.max_ref_tks, overlap=self.ref_chunk_overlap, ) all_chunks.extend(mod_chunks) return all_chunks
===========above chunk 0=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def _group_encode_changed_refs( self, changes: Sequence[ChangedCodeSpan] ) -> Sequence[TokenSeq]: # offset: -1 module2changes = groupby(changes, lambda c: c.module) all_chunks = list[TokenSeq]() for change_group in module2changes.values(): </s> ===========unchanged ref 0=========== at: coeditor._utils TimeLogger(times: dict[str, list[float]]=field(default_factory=dict)) repr_modified_args(instance, flatten: bool=False) -> str at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.ctx_change_encoder.C3ProblemTokenizer max_ref_tks: int = 512 ref_chunk_overlap: int = 32 _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq at: coeditor.ctx_change_encoder.C3ProblemTokenizer._group_encode_changed_refs all_chunks = list[TokenSeq]() segs = list[TokenSeq]() at: coeditor.ctx_change_encoder.ChangedCodeSpan headers: Sequence[ChangedHeader] at: coeditor.encoding Newline_id = get_tk_id("\n") break_into_chunks(tks: TokenSeq, header_f: Callable[[int], TokenSeq], chunk_size: int, overlap: int, right_to_left: bool=False, add_bos: bool=True, max_return_chunks: int | None=None) -> list[TokenSeq] at: dataclasses dataclass(_cls: Type[_T]) -> Type[_T] dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]] dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]] at: jedi.api Script(code=None, *, path=None, environment=None, project=None) ===========changed ref 0=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def _encode_headers( self, scope_changes: Sequence[ChangedHeader], offset: int ) -> TokenSeq: + segs = [c.change_tks.tolist() for c in scope_changes] + if offset != 0: + segs.append(self._get_offset_tks(offset).tolist()) + segs.append([]) + scope_tks = join_list(segs, Newline_id) - scope_tks = join_list( + scope_tks = truncate_section( + scope_tks, TruncateAt.Left, self.max_scope_tks, inplace=True - (self._encode_header_change(c) for c in scope_changes), Newline_id ) - if offset != 0: - scope_tks.extend(encode_basic(f"\n# offset: {offset}\n")) - else: - scope_tks.append(Newline_id) - scope_tks = truncate_section(scope_tks, TruncateAt.Left, self.max_scope_tks) return scope_tks ===========changed ref 1=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: - def _encode_change(self, change: Change[str]) -> TokenSeq: - if (change_tks := self._change_cache.get(change)) is None: - change_tks = change_to_tokens(change) - self._change_cache[change] = change_tks - return change_tks - ===========changed ref 2=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: - def _tokenize_problems(self, problems: Sequence[C3Problem]) -> list[TkC3Problem]: - return join_list(self.tokenize_problem(p) for p in problems) - ===========changed ref 3=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: + def _get_offset_tks(self, offset: int) -> TkArray: + if (tks := self._offset_cache.get(offset)) is None: + tks = TkArray.new(encode_basic(f"# offset: {offset}")) + self._offset_cache[offset] = tks + return tks + ===========changed ref 4=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: - def _encode_header_change(self, ch: ChangedHeader) -> TokenSeq: - hchange = ch.change.map(lambda s: s.strip("\n")) - tks = truncate_section( - change_to_tokens(hchange), TruncateAt.Left, self.max_scope_tks - ) - return tks - ===========changed ref 5=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): + @property + def output_tks(self) -> TokenSeq: + return self.output.tolist() + ===========changed ref 6=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): + @property + def input_tks(self) -> TokenSeq: + return self.input.tolist() + ===========changed ref 7=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def __post_init__(self): + self._offset_cache = LRUCache[int, TkArray](maxsize=100) - self._change_cache = LRUCache[Change[str], TokenSeq](maxsize=500) ===========changed ref 8=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): def all_ctxs(self) -> dict[str, TokenSeq]: + return {name: ref.tolist() for name, ref in self.named_references} - return {name: ref for name, ref in self.named_references} ===========changed ref 9=========== # module: coeditor.ctx_change_encoder @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): @property + def references(self) -> Sequence[TkArray]: - def references(self) -> Sequence[TokenSeq]: + return [ref for _, ref in self.named_references] - return [ref for name, ref in self.named_references] ===========changed ref 10=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: + VERSION = "2.0" - VERSION = "1.0" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 - max_lines_to_edit: int = 20 ref_chunk_overlap: int = 32 - max_chunks_per_elem: int = 4 - skip_unchanged_problems: bool = True
tests.coeditor.test_code_change/TestChangedSpan.test_comments_change
Modified
temp-1
e4e82e3f8fa3a3fef5e2d51584aaa7644a6e1f5b
Pre-commit hook changes.
<0>:<add> # belongs to main
# module: tests.coeditor.test_code_change class TestChangedSpan: def test_comments_change(self): # have to update code as well for the changes to count code2 = dedent( """\ import os + - x = 1 # belongs to f1 + - def f1(): "added doc string" global x x *= 5 return x + 1 + - - # belongs to main <0> if __name__ == "__main__": print(f1() + x + 1) # belongs to print """ ) scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2)) self.check_changed_spans( get_changed_spans(Modified(self.scope1, scope2)), (Modified, -1), (Modified, 1), (Modified, 1), )
===========unchanged ref 0=========== at: tests.coeditor.test_code_change.TestChangedSpan code1 = dedent( """\ import os x = 1 y = x + 1 def f1(): global x x *= 5 return x if __name__ == "__main__": print(f1() + x) """ ) scope1 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code1)) check_changed_spans(changed_spans: Sequence[ChangedSpan], *expects: tuple[type, int]) ===========changed ref 0=========== # module: coeditor.encoding """ Only use this when we want to avoid encoding <add> and <del> as special tokens. """ _BaseTokenizer = spot.utils.DefaultTokenizer Add = "<add>" Del = "<del>" """ + `_BaseTokenizer` extended with <add> and <del> tokens. - `_BaseTokenizer` extended with <add> and <del> tokens. Note that you should avoid using _Tokenizer.encode(text) directly as it will incorrectly eat the spaces around <add> and <del>. Use `encode_change` instead. """ _Tokenizer = copy.deepcopy(_BaseTokenizer) _Tokenizer.add_tokens([Add, Del])
tests.spot.test_type_env/test_self_parameter_annotation
Modified
temp-1
e4e82e3f8fa3a3fef5e2d51584aaa7644a6e1f5b
Pre-commit hook changes.
<0>:<add> return "1"
# module: tests.spot.test_type_env def test_self_parameter_annotation(): code = """ def foo(self: float, x: int) -> str: - return "1" <0> """ parsed = cst.parse_module(code) _, types = collect_user_annotations(parsed) assert_eq(types, [PythonType.from_name("int"), PythonType.from_name("str")]) n_segs = len(mask_types(parsed).code.split(SpecialNames.TypeMask)) assert_eq(n_segs, len(types) + 1) sig = FunctionSignature.from_function(as_any(parsed.body[0]), False) assert len(sig.params) == len(types) - 1
===========unchanged ref 0=========== at: spot.static_analysis mask_types(m: CNode, type_mask=SpecialNames.TypeMask) -> CNode FunctionSignature(params: dict[str, cst.Annotation | None], returns: cst.Annotation | None, in_class: bool) at: spot.static_analysis.FunctionSignature params: dict[str, cst.Annotation | None] returns: cst.Annotation | None in_class: bool from_function(func: cst.FunctionDef, in_class: bool) -> "FunctionSignature" at: spot.type_check PythonType(head: tuple[str, ...], args: tuple["PythonType", ...]=()) at: spot.type_check.PythonType head: tuple[str, ...] args: tuple["PythonType", ...] = () from_name(name: str) -> "PythonType" at: spot.type_env collect_user_annotations(code: cst.Module | cst.MetadataWrapper) -> tuple[list[AnnotInfo], list["PythonType"]] at: spot.utils SpecialNames() as_any(x) -> Any assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str]=lambda: "") -> None at: spot.utils.SpecialNames Return = "<return>" Missing = "<missing>" Lambda = "<lambda>" Empty = "<empty>" TypeMask = "SPOT_TYPE_MASK" ===========changed ref 0=========== # module: coeditor.encoding """ Only use this when we want to avoid encoding <add> and <del> as special tokens. """ _BaseTokenizer = spot.utils.DefaultTokenizer Add = "<add>" Del = "<del>" """ + `_BaseTokenizer` extended with <add> and <del> tokens. - `_BaseTokenizer` extended with <add> and <del> tokens. Note that you should avoid using _Tokenizer.encode(text) directly as it will incorrectly eat the spaces around <add> and <del>. Use `encode_change` instead. """ _Tokenizer = copy.deepcopy(_BaseTokenizer) _Tokenizer.add_tokens([Add, Del]) ===========changed ref 1=========== # module: tests.coeditor.test_code_change class TestChangedSpan: def test_comments_change(self): # have to update code as well for the changes to count code2 = dedent( """\ import os + - x = 1 # belongs to f1 + - def f1(): "added doc string" global x x *= 5 return x + 1 + - + # belongs to main - # belongs to main if __name__ == "__main__": print(f1() + x + 1) # belongs to print """ ) scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2)) self.check_changed_spans( get_changed_spans(Modified(self.scope1, scope2)), (Modified, -1), (Modified, 1), (Modified, 1), )
tests.spot.test_static_analysis/test_module_imports
Modified
temp-1
e4e82e3f8fa3a3fef5e2d51584aaa7644a6e1f5b
Pre-commit hook changes.
<0>:<add> from infer.type import *
# module: tests.spot.test_static_analysis def test_module_imports(): import_code = """ import A import B.C from D import a, b as c from .utils import x from ..top import * - from infer.type import * <0> from . import E """ proj = project_from_code({"root.file1": import_code}) mod = proj.modules["root.file1"] assert mod.imported_modules == { "A", "B", "B.C", "D", "D.a", "D.b", "root", "root.utils", "root.utils.x", "top", "infer", "infer.type", "root.E", }
===========unchanged ref 0=========== at: spot.static_analysis.PythonModule functions: list[PythonFunction] global_vars: list[PythonVariable] classes: list[PythonClass] name: ModuleName imported_modules: set[ModuleName] defined_symbols: dict[str, ProjectPath] tree: cst.Module location_map: dict[cst.CSTNode, CodeRange] elem2pos: dict[ElemPath, CodeRange] removed_comments: list[cst.CSTNode] at: spot.static_analysis.PythonProject root_dir: Path modules: dict[ModuleName, PythonModule] symlinks: dict[ModuleName, ModuleName] module2src_file: dict[ModuleName, Path] DefaultIgnoreDirs = {".venv", ".mypy_cache", ".git", "venv", "build"} at: tests.spot.test_static_analysis project_from_code(name2code: dict[ModuleName, str]) ===========changed ref 0=========== # module: coeditor.encoding """ Only use this when we want to avoid encoding <add> and <del> as special tokens. """ _BaseTokenizer = spot.utils.DefaultTokenizer Add = "<add>" Del = "<del>" """ + `_BaseTokenizer` extended with <add> and <del> tokens. - `_BaseTokenizer` extended with <add> and <del> tokens. Note that you should avoid using _Tokenizer.encode(text) directly as it will incorrectly eat the spaces around <add> and <del>. Use `encode_change` instead. """ _Tokenizer = copy.deepcopy(_BaseTokenizer) _Tokenizer.add_tokens([Add, Del]) ===========changed ref 1=========== # module: tests.spot.test_type_env def test_self_parameter_annotation(): code = """ def foo(self: float, x: int) -> str: + return "1" - return "1" """ parsed = cst.parse_module(code) _, types = collect_user_annotations(parsed) assert_eq(types, [PythonType.from_name("int"), PythonType.from_name("str")]) n_segs = len(mask_types(parsed).code.split(SpecialNames.TypeMask)) assert_eq(n_segs, len(types) + 1) sig = FunctionSignature.from_function(as_any(parsed.body[0]), False) assert len(sig.params) == len(types) - 1 ===========changed ref 2=========== # module: tests.coeditor.test_code_change class TestChangedSpan: def test_comments_change(self): # have to update code as well for the changes to count code2 = dedent( """\ import os + - x = 1 # belongs to f1 + - def f1(): "added doc string" global x x *= 5 return x + 1 + - + # belongs to main - # belongs to main if __name__ == "__main__": print(f1() + x + 1) # belongs to print """ ) scope2 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code2)) self.check_changed_spans( get_changed_spans(Modified(self.scope1, scope2)), (Modified, -1), (Modified, 1), (Modified, 1), )
tests.spot.test_static_analysis/test_fixture_usages
Modified
temp-1
e4e82e3f8fa3a3fef5e2d51584aaa7644a6e1f5b
Pre-commit hook changes.
<0>:<add> return fix1 + use_gfix
# module: tests.spot.test_static_analysis def test_fixture_usages(): <s>root.test_1/fix1", True), ) analysis.assert_usages( "root.test_1/test1", ("root.test_1/fix1", True), ("root.test_1/fix2", True), ) code_conf = """ # root.conftest + import pytest - import pytest @pytest.fixture(scope="session") def use_gfix(): return 1 + - """ code2 = """ # root.nest.test_2 def test_global_fix(fix1, use_gfix): # fix1 should not be visible return fix1 + use_gfix """ code_out = """ + # test_out - # test_out def test_global_fix(fix1, use_gfix): # both fix1 and use_gfix should not be visible - return fix1 + use_gfix <0> """ project = project_from_code( { "root.test_1": code1, "root.conftest": code_conf, "root.nest.test_2": code2, "test_out": code_out, } ) analysis = full_analysis(project) analysis.assert_usages( "root.nest.test_2/test_global_fix", ("root.conftest/use_gfix", True), ) analysis.assert_usages( "test_out/test_global_fix", ) code_global = """ # conftest import pytest @pytest.fixture def fix1(): # should be visible everywhere return 1 """ project = project_from_code( {"root.test_1": code1, "conftest": code_global, "test_out": code_out} ) </s>
===========above chunk 0=========== # module: tests.spot.test_static_analysis def test_fixture_usages(): # offset: -1 code1 = """ # root.test_1 from pytest import fixture import pytest as pt @fixture def fix1(): return 1 @pt.fixture def fix2(fix1): return fix1 + 1 def test1(fix1, fix2): assert fix1 + 1 == fix2 """ project = project_from_code({"root.test_1": code1}) analysis = full_analysis(project) analysis.assert_usages( "root.test_1/fix2", ("root.test_1/fix1", True), ) analysis.assert_usages( "root.test</s> ===========below chunk 0=========== # module: tests.spot.test_static_analysis def test_fixture_usages(): # offset: 1 <s>test_1": code1, "conftest": code_global, "test_out": code_out} ) analysis = full_analysis(project) analysis.assert_usages( "test_out/test_global_fix", ("conftest/fix1", True), ) ===========unchanged ref 0=========== at: tests.spot.test_static_analysis project_from_code(name2code: dict[ModuleName, str]) full_analysis(project: PythonProject) -> UsageAnalysis ===========changed ref 0=========== # module: tests.spot.test_static_analysis def test_constructors(): code1 = """ # root.file1 class A: x: int def __init__(self, x): self.x = x class B(MaybeNamedTuple): x: int y: int = field(init=False) @maybe_dataclass class C: u: int v: int # not a dataclass since A has __init__ class D(A): y: int class E(B): z: int def use(): A(1) B(1, 2) C(1, 2) + def use2(): - def use2(): D(1) E(1,2,3) """ project = project_from_code( { "root.file1": code1, } ) analysis = full_analysis(project) analysis.assert_usages( "root.file1/use", ("root.file1/A.__init__", True), ("root.file1/B.x", True), ("root.file1/B.y", True), ("root.file1/C.u", True), ("root.file1/C.v", True), ) analysis.assert_usages( "root.file1/use2", ("root.file1/A.__init__", True), ("root.file1/B.x", False), ("root.file1/B.y", False), ("root.file1/E.z", True), ) code2 = """ # root.file2 @dataclass class B: x: int y: int = field(init=False) def use(): # these should not trigger constructor usage. B isinstance(x, B) list[B]() """ project = project_from_code( { "</s> ===========changed ref 1=========== # module: tests.spot.test_static_analysis def test_constructors(): # offset: 1 <s> B) list[B]() """ project = project_from_code( { "root.file2": code2, } ) analysis = full_analysis(project) analysis.assert_usages( "root.file2/use", ) ===========changed ref 2=========== # module: tests.spot.test_static_analysis def test_type_usages(): code1 = """ class Arg: + pass - pass class Namespace: pass + - class Struct: def __init__(self, arg: Arg): self.arg = arg """ code2 = """ from code1 import Namespace, Arg, Struct def f(x: Namespace, v): assert isinstance(v, Arg) return Struct(v) """ project = project_from_code({"code1": code1, "code2": code2}) analysis = UsageAnalysis(project, record_type_usages=False) analysis.assert_usages("code2/f", ("code1/Struct.__init__", True)) analysis = UsageAnalysis(project, record_type_usages=True) analysis.assert_usages( "code2/f", ("code1/Struct.__init__", True), ("code1/Struct", True), ("code1/Arg", True), ("code1/Namespace", True), ) ===========changed ref 3=========== # module: tests.spot.test_static_analysis def test_light_stub_gen(): code = """ import typing T1 = typing.TypeVar("T1") # keep T2 = list[T1] # keep number = int # keep Count = 0 # drop class A(typing.Generic[T1]): # keep # drop body x = 0 def __init__(self, x: T1): self.x = x + - def some_f() -> number: # drop return 1 """ expected = """ import typing T1 = typing.TypeVar("T1") T2 = list[T1] number = int class A(typing.Generic[T1]): ... """ stub = stub_from_module( cst.parse_module(code), lightweight=True, rm_comments=True, rm_imports=False ) if stub.code.strip() != expected.strip(): print(show_string_diff(stub.code.strip(), expected.strip())) assert False, "stub code does not match expected." ===========changed ref 4=========== # module: tests.spot.test_static_analysis def test_module_imports(): import_code = """ import A import B.C from D import a, b as c from .utils import x from ..top import * + from infer.type import * - from infer.type import * from . import E """ proj = project_from_code({"root.file1": import_code}) mod = proj.modules["root.file1"] assert mod.imported_modules == { "A", "B", "B.C", "D", "D.a", "D.b", "root", "root.utils", "root.utils.x", "top", "infer", "infer.type", "root.E", }
tests.spot.test_static_analysis/test_element_location_and_code
Modified
temp-1
e4e82e3f8fa3a3fef5e2d51584aaa7644a6e1f5b
Pre-commit hook changes.
<0>:<add> def f2(x, y, z):
# module: tests.spot.test_static_analysis def test_element_location_and_code(): code1 = """ def f1(x, y): return ( x + y ) @annotation - def f2(x, y, z): <0> return x + y + z class A: def m1(self, x): return x + 1 @staticmethod def m2(x): return x + 2 """ lines = code1.split("\n") def get_code_from_range(range: CodeRange): start = range.start.line - 1 end = range.end.line - 1 snippet = "\n".join(lines[start : end + 1]) return snippet project = project_from_code({"file1": code1}) m = project.modules["file1"] for f in m.all_funcs(): f_range = m.location_map[f.tree] assert get_code_from_range(f_range) == f.code
===========unchanged ref 0=========== at: spot.static_analysis.PythonFunction name: str path: ProjectPath parent_class: ProjectPath | None tree: cst.FunctionDef at: spot.static_analysis.PythonModule location_map: dict[cst.CSTNode, CodeRange] all_funcs() -> Generator[PythonFunction, None, None] at: spot.static_analysis.PythonProject modules: dict[ModuleName, PythonModule] at: tests.spot.test_static_analysis project_from_code(name2code: dict[ModuleName, str]) ===========changed ref 0=========== # module: tests.spot.test_static_analysis def test_constructors(): code1 = """ # root.file1 class A: x: int def __init__(self, x): self.x = x class B(MaybeNamedTuple): x: int y: int = field(init=False) @maybe_dataclass class C: u: int v: int # not a dataclass since A has __init__ class D(A): y: int class E(B): z: int def use(): A(1) B(1, 2) C(1, 2) + def use2(): - def use2(): D(1) E(1,2,3) """ project = project_from_code( { "root.file1": code1, } ) analysis = full_analysis(project) analysis.assert_usages( "root.file1/use", ("root.file1/A.__init__", True), ("root.file1/B.x", True), ("root.file1/B.y", True), ("root.file1/C.u", True), ("root.file1/C.v", True), ) analysis.assert_usages( "root.file1/use2", ("root.file1/A.__init__", True), ("root.file1/B.x", False), ("root.file1/B.y", False), ("root.file1/E.z", True), ) code2 = """ # root.file2 @dataclass class B: x: int y: int = field(init=False) def use(): # these should not trigger constructor usage. B isinstance(x, B) list[B]() """ project = project_from_code( { "</s> ===========changed ref 1=========== # module: tests.spot.test_static_analysis def test_constructors(): # offset: 1 <s> B) list[B]() """ project = project_from_code( { "root.file2": code2, } ) analysis = full_analysis(project) analysis.assert_usages( "root.file2/use", ) ===========changed ref 2=========== # module: tests.spot.test_static_analysis def test_fixture_usages(): code1 = """ # root.test_1 from pytest import fixture import pytest as pt @fixture def fix1(): return 1 @pt.fixture def fix2(fix1): return fix1 + 1 def test1(fix1, fix2): assert fix1 + 1 == fix2 """ project = project_from_code({"root.test_1": code1}) analysis = full_analysis(project) analysis.assert_usages( "root.test_1/fix2", ("root.test_1/fix1", True), ) analysis.assert_usages( "root.test_1/test1", ("root.test_1/fix1", True), ("root.test_1/fix2", True), ) code_conf = """ # root.conftest + import pytest - import pytest @pytest.fixture(scope="session") def use_gfix(): return 1 + - """ code2 = """ # root.nest.test_2 def test_global_fix(fix1, use_gfix): # fix1 should not be visible return fix1 + use_gfix """ code_out = """ + # test_out - # test_out def test_global_fix(fix1, use_gfix): # both fix1 and use_gfix should not be visible + return fix1 + use_gfix - return fix1 + use_gfix """ project = project_from_code( { "root.test_1": code1, "root.conftest": code_conf, "root.nest.test_2": code2, "test_out": code_out, } ) analysis = full_analysis(project) analysis.assert_usages( "</s> ===========changed ref 3=========== # module: tests.spot.test_static_analysis def test_fixture_usages(): # offset: 1 <s> } ) analysis = full_analysis(project) analysis.assert_usages( "root.nest.test_2/test_global_fix", ("root.conftest/use_gfix", True), ) analysis.assert_usages( "test_out/test_global_fix", ) code_global = """ # conftest import pytest @pytest.fixture def fix1(): # should be visible everywhere return 1 """ project = project_from_code( {"root.test_1": code1, "conftest": code_global, "test_out": code_out} ) analysis = full_analysis(project) analysis.assert_usages( "test_out/test_global_fix", ("conftest/fix1", True), ) ===========changed ref 4=========== # module: tests.spot.test_static_analysis def test_type_usages(): code1 = """ class Arg: + pass - pass class Namespace: pass + - class Struct: def __init__(self, arg: Arg): self.arg = arg """ code2 = """ from code1 import Namespace, Arg, Struct def f(x: Namespace, v): assert isinstance(v, Arg) return Struct(v) """ project = project_from_code({"code1": code1, "code2": code2}) analysis = UsageAnalysis(project, record_type_usages=False) analysis.assert_usages("code2/f", ("code1/Struct.__init__", True)) analysis = UsageAnalysis(project, record_type_usages=True) analysis.assert_usages( "code2/f", ("code1/Struct.__init__", True), ("code1/Struct", True), ("code1/Arg", True), ("code1/Namespace", True), )
spot.data/output_ids_as_seqs
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> if _min_extra_id <= tk <= _max_extra_id:
# module: spot.data def output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]: """Parse the CodeT5 model's output as a series of key-value pairs. <pad>,<mask>, or <s> or </s> tokens are filtered out.""" buff = TokenSeq() key = None seqs = dict[Token, TokenSeq]() tokenizer = DefaultTokenizer - min_tk = tokenizer.additional_special_tokens_ids[0] - max_tk = tokenizer.additional_special_tokens_ids[-1] bos_tk = tokenizer.bos_token_id eos_tk = tokenizer.eos_token_id for tk in output_ids: if tk <= 0 or tk == bos_tk or tk == eos_tk: continue # pad, mask token, or sequence token - if min_tk <= tk <= max_tk: <0> if key is not None: seqs[key] = buff buff = TokenSeq() key = tk else: buff.append(tk) if key is not None: seqs[key] = buff return seqs
===========unchanged ref 0=========== at: spot.tokenized_src Token = int TokenSeq = list[int] # might need to make this more space-efficient at: spot.utils DefaultTokenizer = load_tokenizer_spot() at: transformers.tokenization_utils_base.SpecialTokensMixin SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] bos_token_id(self, value) NoneType() eos_token_id(self, value) NoneType() additional_special_tokens_ids(self, values) at: typing Iterable = _alias(collections.abc.Iterable, 1)
coeditor.encoding/StrDelta.for_input_range
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> new_delta = {k: v for k, v in self._deltas.items() if a <= k < b}
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" a, b = line_range - new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} <0> return StrDelta(new_delta)
===========unchanged ref 0=========== at: coeditor.encoding StrDelta(_deltas: Mapping[int, tuple[str, ...]]) at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: spot.data + _min_extra_id = DefaultTokenizer.additional_special_tokens_ids[0] + _max_extra_id = DefaultTokenizer.additional_special_tokens_ids[-1] ===========changed ref 1=========== # module: spot.data def output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]: """Parse the CodeT5 model's output as a series of key-value pairs. <pad>,<mask>, or <s> or </s> tokens are filtered out.""" buff = TokenSeq() key = None seqs = dict[Token, TokenSeq]() tokenizer = DefaultTokenizer - min_tk = tokenizer.additional_special_tokens_ids[0] - max_tk = tokenizer.additional_special_tokens_ids[-1] bos_tk = tokenizer.bos_token_id eos_tk = tokenizer.eos_token_id for tk in output_ids: if tk <= 0 or tk == bos_tk or tk == eos_tk: continue # pad, mask token, or sequence token + if _min_extra_id <= tk <= _max_extra_id: - if min_tk <= tk <= max_tk: if key is not None: seqs[key] = buff buff = TokenSeq() key = tk else: buff.append(tk) if key is not None: seqs[key] = buff return seqs
coeditor.encoding/TkDelta.for_input_range
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> new_delta = {k: v for k, v in self._deltas.items() if a <= k < b}
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" a, b = line_range - new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} <0> return TkDelta(new_delta)
===========unchanged ref 0=========== at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] at: coeditor.encoding.TkDelta.__repr__ line_diffs = "\n".join( f" {k}: {tuple(map(decode_tokens, a))}" for k, a in self._deltas.items() if a ) ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" a, b = line_range + new_delta = {k: v for k, v in self._deltas.items() if a <= k < b} - new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} return StrDelta(new_delta) ===========changed ref 3=========== # module: spot.data + _min_extra_id = DefaultTokenizer.additional_special_tokens_ids[0] + _max_extra_id = DefaultTokenizer.additional_special_tokens_ids[-1] ===========changed ref 4=========== # module: spot.data def output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]: """Parse the CodeT5 model's output as a series of key-value pairs. <pad>,<mask>, or <s> or </s> tokens are filtered out.""" buff = TokenSeq() key = None seqs = dict[Token, TokenSeq]() tokenizer = DefaultTokenizer - min_tk = tokenizer.additional_special_tokens_ids[0] - max_tk = tokenizer.additional_special_tokens_ids[-1] bos_tk = tokenizer.bos_token_id eos_tk = tokenizer.eos_token_id for tk in output_ids: if tk <= 0 or tk == bos_tk or tk == eos_tk: continue # pad, mask token, or sequence token + if _min_extra_id <= tk <= _max_extra_id: - if min_tk <= tk <= max_tk: if key is not None: seqs[key] = buff buff = TokenSeq() key = tk else: buff.append(tk) if key is not None: seqs[key] = buff return seqs
coeditor.encoding/TkDelta.__bool__
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> return self.num_changes() > 0
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: - return bool(self._deltas) <0>
===========unchanged ref 0=========== at: coeditor.encoding TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] at: coeditor.encoding.TkDelta.for_input_range a, b = line_range a, b = line_range at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" a, b = line_range + new_delta = {k: v for k, v in self._deltas.items() if a <= k < b} - new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} return TkDelta(new_delta) ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def for_input_range(self, line_range: tuple[int, int]) -> Self: """Compute the delta for the given line range.""" a, b = line_range + new_delta = {k: v for k, v in self._deltas.items() if a <= k < b} - new_delta = {k - a: v for k, v in self._deltas.items() if a <= k < b} return StrDelta(new_delta) ===========changed ref 4=========== # module: spot.data + _min_extra_id = DefaultTokenizer.additional_special_tokens_ids[0] + _max_extra_id = DefaultTokenizer.additional_special_tokens_ids[-1] ===========changed ref 5=========== # module: spot.data def output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]: """Parse the CodeT5 model's output as a series of key-value pairs. <pad>,<mask>, or <s> or </s> tokens are filtered out.""" buff = TokenSeq() key = None seqs = dict[Token, TokenSeq]() tokenizer = DefaultTokenizer - min_tk = tokenizer.additional_special_tokens_ids[0] - max_tk = tokenizer.additional_special_tokens_ids[-1] bos_tk = tokenizer.bos_token_id eos_tk = tokenizer.eos_token_id for tk in output_ids: if tk <= 0 or tk == bos_tk or tk == eos_tk: continue # pad, mask token, or sequence token + if _min_extra_id <= tk <= _max_extra_id: - if min_tk <= tk <= max_tk: if key is not None: seqs[key] = buff buff = TokenSeq() key = tk else: buff.append(tk) if key is not None: seqs[key] = buff return seqs
coeditor.retrieval_model/RetrievalEditorModel.train_on_data
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> cprint("blue", "Number of training batches (estimate):", epoch_steps)
<s>model class RetrievalEditorModel(T5PreTrainedModel): def train_on_data( self, training_name: str, + train_loader: "C3DataLoader", + eval_loader: "C3DataLoader", - train_data: TokenizedEditDataset, - eval_data: TokenizedEditDataset, train_args: "TrainingArgs", - batch_args: "BatchArgs", - eval_batch_args: "BatchArgs", ) -> None: <s>LoopOutput: + metrics = model.eval_loss_on_loader(as_any(dataloader)) - metrics = model.eval_loss_on_loader(dataloader) n_samples = metrics["loss_per_ex"].weight metrics = { f"{metric_key_prefix}_{k}": v.mean() for k, v in metrics.items() } return EvalLoopOutput( predictions=tuple(), label_ids=tuple(), metrics=metrics, num_samples=n_samples, ) + epoch_steps = len(train_loader) - epoch_steps = len(train_lodader) - print("Number of training batches (estimate):", epoch_steps) <0> trainer_args = Seq2SeqTrainingArguments( output_dir=str(train_dir), overwrite_output_dir=True, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=max(1, epoch_steps // 10), num_train_epochs=train_args.max_train_epochs, save_total_limit=2, lr_scheduler_type=train_args.lr_scheduler_type, learning_rate=train_args.learning_rate, weight_decay=train_args.weight_decay, metric_for_best_model="loss_per_tk", greater_is_better=False, fp16=True, load_best_model_at_end=True, push_to_hub=False</s>
===========above chunk 0=========== <s>Model(T5PreTrainedModel): def train_on_data( self, training_name: str, + train_loader: "C3DataLoader", + eval_loader: "C3DataLoader", - train_data: TokenizedEditDataset, - eval_data: TokenizedEditDataset, train_args: "TrainingArgs", - batch_args: "BatchArgs", - eval_batch_args: "BatchArgs", ) -> None: # offset: -1 <s>_edits() - eval_edits = eval_data.all_edits() - assert len(train_edits) > 0, "No training edits provided." - - train_lodader = edits_to_dataloader( - train_edits, args=batch_args, shuffle=True, desc="Training Epoch" - ) - eval_loader = edits_to_dataloader( - eval_edits, - args=eval_batch_args, - shuffle=False, - desc="Eval Epoch", + eval_loader.tqdm_args = {"disable": True} - tqdm_args={"disable": True}, - ) model = self # model = torch.compile(self.to("cuda")) # pytorch doesn't support python 3.11 yet. class DynamicTrainer(Seq2SeqTrainer): def get_train_dataloader(self): + return train_loader - return train_lodader def get_eval_dataloader(self, eval_dataset): return eval_loader def evaluation_loop( self, dataloader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: + metrics = model.eval_loss_on_loader(as_any(dataloader)) - </s> ===========above chunk 1=========== <s>Model(T5PreTrainedModel): def train_on_data( self, training_name: str, + train_loader: "C3DataLoader", + eval_loader: "C3DataLoader", - train_data: TokenizedEditDataset, - eval_data: TokenizedEditDataset, train_args: "TrainingArgs", - batch_args: "BatchArgs", - eval_batch_args: "BatchArgs", ) -> None: # offset: -2 train_dir = get_model_dir(trained=False) / training_name - - train_edits = train_data.all_edits() - eval_edits = eval_data.all_edits() - assert len(train_ed</s> ===========below chunk 0=========== <s>EditorModel(T5PreTrainedModel): def train_on_data( self, training_name: str, + train_loader: "C3DataLoader", + eval_loader: "C3DataLoader", - train_data: TokenizedEditDataset, - eval_data: TokenizedEditDataset, train_args: "TrainingArgs", - batch_args: "BatchArgs", - eval_batch_args: "BatchArgs", ) -> None: # offset: 1 <s> fp16=True, load_best_model_at_end=True, push_to_hub=False, report_to=["wandb"], disable_tqdm=True, # torchdynamo="inductor", # use compiled model ) trainer = DynamicTrainer( self, trainer_args, callbacks=[EarlyStoppingCallback(early_stopping_patience=1)], ) trainer.train() save_dir = get_model_dir(trained=True) / training_name self.save(save_dir) print("Model saved to:", save_dir) ===========unchanged ref 0=========== at: coeditor._utils as_any(x) -> Any TimeLogger(times: dict[str, list[float]]=field(default_factory=dict)) at: coeditor.common get_model_dir(trained=True) -> Path at: coeditor.common.WeightedSum sum: V weight: W mean() -> float at: coeditor.retrieval_model AttentionMode() at: coeditor.retrieval_model.C3DataLoader all_probs: Sequence[C3Problem] tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 workers: int = 10 at: coeditor.retrieval_model.RetrievalEditorModel is_parallelizable = False supports_gradient_checkpointing = False eval_loss_on_loader(self, dataloader: "C3DataLoader") eval_loss_on_loader(dataloader: "C3DataLoader") decorate_autocast(dataloader: "C3DataLoader") at: coeditor.retrieval_model.RetrievalEditorModel.__init__ self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) amode = getattr(config, "attention_mode", AttentionMode.bidirectional.name) self.attention_mode = AttentionMode[amode] self.tlogger = TimeLogger()
coeditor.retrieval_model/RetrievalEditorModel.eval_loss_on_loader
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> for batch in dataloader.__iter__():
# module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") + def eval_loss_on_loader(self, dataloader: "C3DataLoader"): - def eval_loss_on_loader(self, dataloader): core = self previous = core.training core.eval() metrics = dict[str, WeightedSum]() - for batch in tqdm( - dataloader, desc="evaluate loss", unit="batch", smoothing=0.0 - ): <0> batch["input_ids"] = batch["input_ids"].to(core.device) batch["labels"] = batch["labels"].to(core.device) outputs = core.forward(**batch) assert isinstance(outputs, Seq2SeqLMOutput) if CheckNaN: if outputs.logits.isnan().any(): print("loss:", not_none(outputs.loss).item()) print("batch:", batch) raise ValueError("NaN in logits") for k, v in compute_loss_metrics(outputs.logits, batch["labels"]).items(): v = v + metrics.get(k, WeightedSum(0.0, 0)) metrics[k] = v core.train(mode=previous) return metrics
===========unchanged ref 0=========== at: coeditor.common get_model_dir(trained=True) -> Path WeightedSum(sum: V, weight: W) at: coeditor.retrieval_model.C3DataLoader __iter__() -> Iterable[dict] at: coeditor.retrieval_model.RetrievalEditorModel save(save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None) forward(input_ids: LongTensor | None=None, references: Sequence[TokenSeq] | None=None, query_ref_list: Sequence[Sequence[int]] | None=None, labels: LongTensor | None=None, encoder_outputs: "RetrivalEncoderOutputs | None"=None, decoder_input_ids: LongTensor | None=None, decoder_inputs_embeds: Tensor | None=None, decoder_attention_mask: Tensor | None=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None) -> Seq2SeqLMOutput ===========unchanged ref 1=========== at: coeditor.retrieval_model.RetrievalEditorModel.train_on_data DynamicTrainer(model: Union["PreTrainedModel", nn.Module]=None, args: "TrainingArguments"=None, data_collator: Optional["DataCollator"]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional["PreTrainedTokenizerBase"]=None, model_init: Optional[Callable[[], "PreTrainedModel"]]=None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]]=None, callbacks: Optional[List["TrainerCallback"]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None) trainer_args = Seq2SeqTrainingArguments( output_dir=str(train_dir), overwrite_output_dir=True, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=max(1, epoch_steps // 10), num_train_epochs=train_args.max_train_epochs, save_total_limit=2, lr_scheduler_type=train_args.lr_scheduler_type, learning_rate=train_args.learning_rate, weight_decay=train_args.weight_decay, metric_for_best_model="loss_per_tk", greater_is_better=False, fp16=True, load_best_model_at_end=True, push_to_hub=False, report_to=["wandb"], disable_tqdm=True, # torchdynamo="inductor", # use compiled model ) at: torch.amp.autocast_mode autocast(device_type: str, dtype: Optional[_dtype]=None, enabled: bool=True, cache_enabled: Optional[bool]=None) ===========unchanged ref 2=========== at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) eval() -> T at: torch.nn.modules.module.Module.train self.training = mode ===========unchanged ref 3=========== at: transformers.modeling_outputs Seq2SeqLMOutput(loss: Optional[torch.FloatTensor]=None, logits: torch.FloatTensor=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, decoder_hidden_states: Optional[Tuple[torch.FloatTensor]]=None, decoder_attentions: Optional[Tuple[torch.FloatTensor]]=None, cross_attentions: Optional[Tuple[torch.FloatTensor]]=None, encoder_last_hidden_state: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[Tuple[torch.FloatTensor]]=None, encoder_attentions: Optional[Tuple[torch.FloatTensor]]=None) at: transformers.trainer.Trainer train(resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union["optuna.Trial", Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs) at: transformers.trainer_callback EarlyStoppingCallback(early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0) ===========changed ref 0=========== # module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): - def eval_loss_on_data( - self, data: TokenizedEditDataset, batch_args: "BatchArgs" - ) -> dict[str, WeightedSum]: - batch_args = copy.deepcopy(batch_args) - batch_args.shuffle_extra_ids = False - eval_loader = edits_to_dataloader( - data.all_edits(), - args=batch_args, - shuffle=False, - desc="Eval Epoch", - ) - return self.eval_loss_on_loader(eval_loader) - ===========changed ref 1=========== # module: coeditor.retrieval_model + @dataclass + class RetrievalDecodingResult: - class RetrievalDecodingResult(DatasetDecodingResult[TkC3Problem]): + def __post_init__(self): + assert_eq(len(self.problems), len(self.predictions)) +
coeditor.retrieval_model/RetrievalEditorModel.predict_on_data
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> problems=eval_problems,
# module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") def predict_on_data( self, + eval_problems: Sequence[C3Problem], + tokenizer: C3ProblemTokenizer, - eval_data: TokenizedEditDataset, batch_args: "BatchArgs", dec_args: DecodingArgs, ): <s>False, desc="predict_on_data" - eval_edits, batch_args, shuffle=False, desc="Decoding Epoch" ) gen_args = dec_args.to_model_args() + batch_elems = list[RetrievalModelPrediction]() - batch_elems = list[ModelPrediction]() for batch in eval_loader: # type: ignore out_tks = self.generate( batch["input_ids"].to(self.device), references=batch["references"], query_ref_list=batch["query_ref_list"], **gen_args, ).tolist() # type: ignore input_ids = batch["input_ids"].tolist() labels = batch["labels"].tolist() query_ref_list = batch["query_ref_list"] for i in range(len(input_ids)): all_refs = batch["references"] references = [all_refs[j] for j in query_ref_list[i]] e = RetrievalModelPrediction( input_ids=remove_pad_ids(input_ids[i]), output_ids=remove_pad_ids(out_tks[i]), labels=labels[i], references=references, ) batch_elems.append(e) return RetrievalDecodingResult( eval_args={"batch_args": batch_args, "dec_args": dec_args}, - edits=eval_edits, <0> predictions=batch_elems, )
===========above chunk 0=========== # module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") def predict_on_data( self, + eval_problems: Sequence[C3Problem], + tokenizer: C3ProblemTokenizer, - eval_data: TokenizedEditDataset, batch_args: "BatchArgs", dec_args: DecodingArgs, ): # offset: -1 if batch_args.shuffle_extra_ids: warnings.warn( "Shuffling extra ids during eval can lead to incorrect results." ) + - eval_edits = eval_data.all_edits() + eval_loader = C3DataLoader( - eval_loader = _BatchSampler( + eval_problems, tokenizer, batch_args, shuffle=False, desc="predict_on_data" - eval_edits, batch_args, shuffle=False, desc="Decoding</s> ===========unchanged ref 0=========== at: _warnings warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None at: coeditor._utils not_none(x: Optional[T1]) -> T1 at: coeditor.common WeightedSum(sum: V, weight: W) at: coeditor.model DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1) compute_loss_metrics(logits: torch.Tensor, labels: torch.Tensor) -> Mapping[str, WeightedSum] at: coeditor.model.DecodingArgs max_output_tks: int = 512 do_sample: bool = False top_p: float = 0.9 num_beams: Optional[int] = 1 length_penalty: float = 0.0 marginalize_samples: int = 1 to_model_args() -> dict at: coeditor.retrieval_model RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) RetrievalModelPrediction(**kwargs: _VT) RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT) C3DataLoader() at: coeditor.retrieval_model.BatchArgs min_queries: int = 1 max_queries: int = 8 shuffle_extra_ids: bool = True at: coeditor.retrieval_model.RetrievalEditorModel.eval_loss_on_loader core = self previous = core.training ===========unchanged ref 1=========== batch["input_ids"] = batch["input_ids"].to(core.device) batch["labels"] = batch["labels"].to(core.device) outputs = core.forward(**batch) at: torch.amp.autocast_mode autocast(device_type: str, dtype: Optional[_dtype]=None, enabled: bool=True, cache_enabled: Optional[bool]=None) at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.module.Module train(mode: bool=True) -> T at: transformers.generation.utils.GenerationMixin generate(inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]]=None, synced_gpus: Optional[bool]=None, assistant_model: Optional["PreTrainedModel"]=None, streamer: Optional["BaseStreamer"]=None, **kwargs) -> Union[GenerateOutput, torch.LongTensor] at: transformers.modeling_outputs.Seq2SeqLMOutput loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========unchanged ref 2=========== at: typing.Mapping get(key: _KT) -> Optional[_VT_co] get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] ===========changed ref 0=========== # module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): - def eval_loss_on_data( - self, data: TokenizedEditDataset, batch_args: "BatchArgs" - ) -> dict[str, WeightedSum]: - batch_args = copy.deepcopy(batch_args) - batch_args.shuffle_extra_ids = False - eval_loader = edits_to_dataloader( - data.all_edits(), - args=batch_args, - shuffle=False, - desc="Eval Epoch", - ) - return self.eval_loss_on_loader(eval_loader) - ===========changed ref 1=========== # module: coeditor.retrieval_model + @dataclass + class RetrievalDecodingResult: - class RetrievalDecodingResult(DatasetDecodingResult[TkC3Problem]): + def __post_init__(self): + assert_eq(len(self.problems), len(self.predictions)) + ===========changed ref 2=========== # module: coeditor.retrieval_model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") + def eval_loss_on_loader(self, dataloader: "C3DataLoader"): - def eval_loss_on_loader(self, dataloader): core = self previous = core.training core.eval() metrics = dict[str, WeightedSum]() - for batch in tqdm( - dataloader, desc="evaluate loss", unit="batch", smoothing=0.0 - ): + for batch in dataloader.__iter__(): batch["input_ids"] = batch["input_ids"].to(core.device) batch["labels"] = batch["labels"].to(core.device) outputs = core.forward(**batch) assert isinstance(outputs, Seq2SeqLMOutput) if CheckNaN: if outputs.logits.isnan().any(): print("loss:", not_none(outputs.loss).item()) print("batch:", batch) raise ValueError("NaN in logits") for k, v in compute_loss_metrics(outputs.logits, batch["labels"]).items(): v = v + metrics.get(k, WeightedSum(0.0, 0)) metrics[k] = v core.train(mode=previous) return metrics
coeditor.retrieval_model/RetrivalEncoder.forward
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> [to_long_tensor(references[rid]) for rid in rids]
# module: coeditor.retrieval_model @dataclass class RetrivalEncoder: def forward( self, input_ids: LongTensor, references: Sequence[TokenSeq] | None = None, query_ref_list: Sequence[Sequence[int]] | None = None, # not used arguments below: output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None, ) -> RetrivalEncoderOutputs: <s> n_queries = input_ids.size(0) q_lens = input_ids.ne(PAD_id).sum(dim=1).tolist() n_refs = len(references) if query_ref_list is None: query_ref_list = [list(range(n_refs)) for _ in range(n_queries)] if self.attention_mode.value == AttentionMode.bidirectional.value: # use bidirectional implementation queries = [cast(LongTensor, input_ids[i, :l]) for i, l in enumerate(q_lens)] refs = [ - [ - cast(LongTensor, torch.tensor(references[rid]).to(device)) - for rid in rids - ] <0> for rids in query_ref_list ] hidden_rows = self.bidirectional_forward(queries, refs) last_hidden_state, hidden_state_mask = stack_pad_tensors(hidden_rows) return RetrivalEncoderOutputs( last_hidden_state=last_hidden_state, hidden_state_mask=hidden_state_mask ) def split_outputs( lens: Sequence[int], out: BaseModelOutputWithPastAndCrossAttentions ) -> Iterable[BaseModelOutputWithPastAndCrossAttentions]: for i, l in enumerate(lens): hidden_states = tuple( s[i : i + 1, :l] for s in not_none(out.hidden_states) ) yield BaseModelOutputWithPastAndCrossAttentions</s>
===========above chunk 0=========== <s> coeditor.retrieval_model @dataclass class RetrivalEncoder: def forward( self, input_ids: LongTensor, references: Sequence[TokenSeq] | None = None, query_ref_list: Sequence[Sequence[int]] | None = None, # not used arguments below: output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None, ) -> RetrivalEncoderOutputs: # offset: -1 """ Shapes - input_ids: (n_queries, seq_len) - references: (num_refs, ref_len) - ref_masks: for each query, a list of reference indices. If none, assume all references are accessible to all queries. """ + + def to_long_tensor(data): + return cast( + LongTensor, + torch.tensor(data, dtype=torch.long).to(device), + ) + if references is None: references = [] assert_eq(input_ids.dim(), 2) assert_eq(input_ids.dtype, torch.long) device = self.encoder.device n_queries = input_ids.size(0) q_lens = input_ids.ne(PAD_id).sum</s> ===========below chunk 0=========== <s>: coeditor.retrieval_model @dataclass class RetrivalEncoder: def forward( self, input_ids: LongTensor, references: Sequence[TokenSeq] | None = None, query_ref_list: Sequence[Sequence[int]] | None = None, # not used arguments below: output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None, ) -> RetrivalEncoderOutputs: # offset: 1 <s>] for s in not_none(out.hidden_states) ) yield BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states[-1], # type: ignore hidden_states=hidden_states, # type: ignore ) ref_outputs = batched_map( references, group_key=lambda ref: _round_length_group(len(ref)), f=lambda refs: split_outputs( [len(x) for x in refs], self.encoder.forward( pad_token_seqs(refs).to(device), output_hidden_states=True, return_dict=True, ), ), ) def encode_queries(query_ids: Sequence[int]) -> Iterable[Tensor]: queries = [cast(LongTensor, input_ids[q, : q_lens[q]]) for q in query_ids] assert query_ref_list is not None query_refs = [query_ref_list[q] for q in query_ids] q_tensor, q_mask = stack_pad_tensors(queries) assert_eq(q_tensor.dim(), 2) if self.attention_mode.value == AttentionMode.query2ref.value: enc = self.encode_query_uni_directional( query_ids=cast(LongTensor, q_tensor), query_attention_mask=q_mask, ref_outputs=ref_outputs, query_ref_list=query_</s> ===========below chunk 1=========== <s>: coeditor.retrieval_model @dataclass class RetrivalEncoder: def forward( self, input_ids: LongTensor, references: Sequence[TokenSeq] | None = None, query_ref_list: Sequence[Sequence[int]] | None = None, # not used arguments below: output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None, ) -> RetrivalEncoderOutputs: # offset: 2 <s> query_attention_mask=q_mask, ref_outputs=ref_outputs, query_ref_list=query_refs, ) else: assert_eq(self.attention_mode.value, AttentionMode.basic.value) enc = self.encode_query_basic( query_ids=cast(LongTensor, q_tensor), query_attention_mask=q_mask, ref_outputs=ref_outputs, query_ref_list=query_refs, ) last_hidden_state, hidden_state_mask = enc for i, _ in enumerate(queries): yield last_hidden_state[i, hidden_state_mask[i]] def query_group_key(q: int) -> tuple[int, int]: q_len = q_lens[q] ref_len = sum( len(not_none(references)[r]) for r in not_none(query_ref_list)[q] ) return _round_length_group(q_len), _round_length_group(ref_len) last_hidden_states = batched_map( range(n_queries), group_key=query_group_key, f=encode_queries, ) last_hidden_state, hidden_state_mask = stack_pad_tensors(last_hidden_states) return RetrivalEncoderOutputs( last ===========unchanged ref 0=========== at: coeditor._utils not_none(x: Optional[T1]) -> T1 assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None at: coeditor.common batched_map(xs: Sequence[T1], group_key: Callable[[T1], Any], f: Callable[[Sequence[T1]], Iterable[T2]]) -> list[T2] at: coeditor.encoding PAD_id = get_tk_id("<pad>") at: coeditor.retrieval_model AttentionMode() RetrivalEncoderOutputs(last_hidden_state: Tensor, hidden_state_mask: Tensor | None=None) stack_pad_tensors(xs: Sequence[Tensor]) -> tuple[Tensor, BoolTensor] pad_token_seqs(seqs: Sequence[TokenSeq], pad_id=None) -> LongTensor _round_length_group(x: int) -> int at: coeditor.retrieval_model.AttentionMode basic = enum.auto() query2ref = enum.auto() bidirectional = enum.auto() at: coeditor.retrieval_model.RetrivalEncoder encoder: T5Stack attention_mode: AttentionMode _init_embed(self, input_ids: LongTensor) -> Tensor bidirectional_forward(queries: Sequence[LongTensor], references: Sequence[Sequence[LongTensor]]) -> Sequence[Tensor] encode_query_basic(query_ids: LongTensor, query_attention_mask: Tensor, ref_outputs: Sequence[BaseModelOutputWithPastAndCrossAttentions], query_ref_list: Sequence[Sequence[int]]) -> tuple[Tensor, Tensor] encode_query_uni_directional(query_ids: LongTensor, query_attention_mask: BoolTensor, ref_outputs: Sequence[BaseModelOutputWithPastAndCrossAttentions], query_ref_list: Sequence[Sequence[int]]) -> tuple[Tensor, Tensor]
coeditor.code_change/get_changed_spans
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> if subdelta := delta.for_input_range(line_range).shifted(-line):
# module: coeditor.code_change def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: <s> such as class methods), then map the changes to each line back to the original regions. """ def get_modified_spans( old_scope: ChangeScope, new_scope: ChangeScope, parent_changes: Sequence[Change[ChangeScope]], ) -> Iterable[ChangedSpan]: if code_equal(old_scope.spans_code, new_scope.spans_code): return diffs = change_to_line_diffs( Modified(old_scope.spans_code, new_scope.spans_code) ) original, delta = line_diffs_to_original_delta(diffs) line = 0 for span in old_scope.spans: code = span.code line_range = (line, line + len(code.split("\n"))) - if subdelta := delta.for_input_range(line_range): <0> new_code = subdelta.apply_to_input(code) change = Modified(code, new_code) yield ChangedSpan( change, parent_changes, span.line_range, ) line = line_range[1] def recurse( scope_change: Change[ChangeScope], parent_changes ) -> Iterable[ChangedSpan]: parent_changes = (*parent_changes, scope_change) match scope_change: case Modified(old_scope, new_scope): # compute statement differences yield from get_modified_spans(old_scope, new_scope, parent_changes) for sub_change in get_named_changes( old_scope.subscopes, new_scope.subscopes ).values(): yield from recurse(sub_change, parent_changes) case Added(scope) | Deleted(scope): for span in scope.spans: code_change = scope_</s>
===========above chunk 0=========== # module: coeditor.code_change def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: # offset: -1 """ Extract the change spans from scope change. - We need a tree differencing algorithm that are robust to element movements. - To compute the changes to each statement region, we can compute the differences by concatenating all the regions before and after the edit (and hiding all the sub spans such as class methods), then map the changes to each line back to the original regions. """ def get_modified</s> ===========below chunk 0=========== # module: coeditor.code_change def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: # offset: 1 <s>) case Added(scope) | Deleted(scope): for span in scope.spans: code_change = scope_change.new_value(span.code) yield ChangedSpan( code_change, parent_changes, span.line_range, ) for s in scope.subscopes.values(): s_change = scope_change.new_value(s) yield from recurse(s_change, parent_changes) spans = list(recurse(scope_change, parent_changes)) spans.sort(key=lambda s: s.line_range[0]) return spans ===========unchanged ref 0=========== at: coeditor.code_change ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None") ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange) at: coeditor.code_change.ChangeScope path: ProjectPath tree: ScopeTree spans: Sequence["StatementSpan"] subscopes: Mapping[str, Self] parent_scope: "ChangeScope | None" at: coeditor.code_change.StatementSpan.__post_init__ self.code: str = code + "\n" self.line_range: LineRange = line_range(start, end) at: coeditor.common code_equal(code1: str, code2: str) -> bool at: coeditor.encoding change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] at: typing Iterable = _alias(collections.abc.Iterable, 1) Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.retrieval_model + @dataclass + class C3DataLoader: + def __len__(self) -> int: + return self._len_est + ===========changed ref 1=========== # module: coeditor.retrieval_model - @dataclass - class _BatchSampler: - def __len__(self) -> int: - return self._len_est - ===========changed ref 2=========== # module: coeditor.retrieval_model + @dataclass + class RetrievalDecodingResult: - class RetrievalDecodingResult(DatasetDecodingResult[TkC3Problem]): + def __post_init__(self): + assert_eq(len(self.problems), len(self.predictions)) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __bool__(self) -> bool: + return self.num_changes() > 0 - return bool(self._deltas) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + def shifted(self, shift_lines: int) -> Self: + return StrDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def shifted(self, shift_lines: int) -> Self: + return TkDelta({k + shift_lines: v for k, v in self._deltas.items()}) + ===========changed ref 7=========== # module: coeditor.retrieval_model - @dataclass - class _BatchSampler: - def estimate_n_batches(self) -> int: - batches = tk_edits_to_batches(self.all_edits, self.batch_args, silent=True) - return len(batches) - ===========changed ref 8=========== # module: coeditor.retrieval_model @dataclass class BatchArgs: @classmethod def eval_default(cls) -> Self: return BatchArgs( - max_total_ref_tks=512 * 24, max_queries=32, shuffle_extra_ids=False, ) ===========changed ref 9=========== # module: spot.data + _min_extra_id = DefaultTokenizer.additional_special_tokens_ids[0] + _max_extra_id = DefaultTokenizer.additional_special_tokens_ids[-1] ===========changed ref 10=========== # module: coeditor.retrieval_model - def edits_to_dataloader( - edits: Sequence[TkC3Problem], - args: BatchArgs, - desc: str, - shuffle: bool = False, - tqdm_args: dict | None = None, - ): - assert edits - return _BatchSampler( - list(edits), args, shuffle=shuffle, desc=desc, tqdm_args=tqdm_args - ) - ===========changed ref 11=========== # module: coeditor.retrieval_model - @dataclass - class _BatchSampler: - def __post_init__(self): - if self.shuffle: - random.shuffle(self.all_edits) - self._len_est = self.estimate_n_batches() - self.epochs = 0 - ===========changed ref 12=========== # module: coeditor.retrieval_model + @dataclass + class C3DataLoader: + def __post_init__(self): + n_batches, batch_stats = self.estimate_batch_stats() + self._len_est = n_batches + self._batch_stast = batch_stats + self.epochs = 0 + ===========changed ref 13=========== # module: coeditor.retrieval_model - @dataclass - class _BatchSampler: - all_edits: list[TkC3Problem] - batch_args: BatchArgs - shuffle: bool - desc: str - tqdm_args: dict | None = None - ===========changed ref 14=========== # module: coeditor.retrieval_model @dataclass class BatchArgs: - def cost_limit(self) -> float: - return self.min_queires * retrieval_cost_model( - self.max_total_ref_tks, self.max_query_tks, self.max_output_tks - ) - ===========changed ref 15=========== # module: coeditor.retrieval_model + @dataclass + class C3DataLoader: + all_probs: Sequence[C3Problem] + tokenizer: C3ProblemTokenizer + batch_args: BatchArgs + shuffle: bool + desc: str + tqdm_args: dict | None = None + chunk_size: int = 1000 + workers: int = 10 +
scripts.coeditor.train_retrieval_model/train_model
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> cprint("blue", "Exact-match samples saved to:", out_dir)
<s>train_retrieval_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), + eval_batch_args=BatchArgs.eval_default(), - test_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): <s>model_name, train_loader, eval_loader, train_args) model.to("cuda") with timed_action("Loss Evaluation"): + eval_result = model.eval_loss_on_loader(eval_loader) - eval_result = model.eval_loss_on_data(datasets["test"], test_batch_args) eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()} wandb.log(eval_dict) max_saved_samples = 300 with timed_action("Accuracy Evaluation"): + dec_result = model.predict_on_data( + datasets["test"], eval_tkn, eval_batch_args, dec_args + ) - dec_result = model.predict_on_data(datasets["test"], test_batch_args, dec_args) pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result) exact_acc, exact_correct_map = dec_result.exact_match_accuracy() wandb.log({"test/exact-acc": exact_acc.average()}) out_dir = get_model_dir() / model_name / "exact_match_samples" dec_result.save_examples_to_dir( out_dir, random_subset(exact_correct_map, max_saved_samples) ) - print("Exact-match samples saved to:", out_dir) <0> return model
===========above chunk 0=========== <s> def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), + eval_batch_args=BatchArgs.eval_default(), - test_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -1 <s> <add> warm_up_data, + warmup_tkn, + warmup_bargs, + shuffle=True, + desc="warm-up training", + ) warmup_targs = copy.deepcopy(train_args) warmup_targs.learning_rate *= 4 warmup_targs.max_train_epochs = 1 - all_edits = datasets["train"].all_edits() - warmup_edits = random_subset(all_edits, len(all_edits) // 4) - model.train_on_data( - model_name, - TokenizedEditDataset.from_edits(warmup_edits), - datasets["valid"], - warmup_targs, - batch_args=warmup_bargs, - eval_batch_args=test_batch_args, - ) + model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs) with timed_action("Fine-tune Training"): - model.train_on_data( - model_name, - datasets["train"], - datasets["valid"], - train_args, - batch_args=batch_args, - eval_batch_args=test_batch_args, - ) + model.train_on_data(model_name, train_loader, eval_loader, train_args) model.to("cuda") with timed</s> ===========above chunk 1=========== <s> def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), + eval_batch_args=BatchArgs.eval_default(), - test_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -2 <s> train_tkn = encoder.edit_tokenizer + eval_tkn = copy.deepcopy(train_tkn) + eval_tkn.max_ref_tks_sum *= 2 + eval_loader = C3DataLoader( + datasets["valid"], eval_tkn, eval_batch_args, shuffle=False, desc="eval" + ) + if not eval_only: + train_loader = C3DataLoader( + datasets["train"], train_tkn, batch_args, shuffle=True, desc="training" + ) + with timed_action("Warm-up Training"): warmup_bargs = copy.deepcopy(batch_args) - warmup_bargs.max_total_ref_tks //= 3 + warmup_bargs.min_queries *= 4 - warmup_bargs.min_queires *= 4 warmup_bargs.max_queries *= 2 + + warm_up_data = random_subset(datasets["train"], len(datasets["train"]) // 4) + warmup_tkn = copy.deepcopy(train_tkn) + warmup_tkn.max_ref_tks_sum //= 3 + warmup_loader = C3DataLoader( + warm_up_data, + warmup_tkn, + warmup_bargs, + shuffle</s> ===========above chunk 2=========== <s> def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), + eval_batch_args=BatchArgs.eval_default(), - test_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -3 <s>() } project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest" + if eval_only: + project = "eval-" + project wandb.init(dir="..", project=project, name=model_name, config=config_dict) if train_args.quicktest: print("Using fewer data for quick test.") n_quick_exs = 20 - for name, dataset in datasets.items(): - datasets[name] = TokenizedEditDataset.from_edits( - dataset.all_edits()[:n_quick_exs] - ) + datasets = {name: data[:n_quick_exs] for name, data in datasets.items()} if not eval_only: model = RetrievalEditorModel.from_code_t5( "base", reuse_embed=True, reinit_weights=train_args.reinit_weights ) else: model = RetrievalEditorModel.load(get_model_dir() / model_name) if os.getenv("CUDA_VISIBLE_DEVICES") is None: warnings.warn( "CUDA_VISIBLE_DEVICES not set, using 0. Note that " "the Huggingface Trainer will use all visible GPUs for training." ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" </s> ===========above chunk 3=========== <s> def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), + eval_batch_args=BatchArgs.eval_default(), - test_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -4 # model_variant = "-file" model_name = f"coeditor-{dataset_name}" model_name += model_variant dec_args = DecodingArgs() if train_args.quicktest: model_name = "quicktest-" + model_name if not eval_only: check_save_dir(model_name) + datasets = make_or_load_datasets( - split2problems = make_or_load_datasets( dataset_name, encoder.change_processor, recreate_data=recreate_data ) - datasets = { - name: TokenizedEditDataset.from_edits(edits) - for name, edits in encoder.edit_tokenizer.tokenize_datasets( - split2problems - ).items() - } config_dict = { k: get_modified_args(v) for k, v in { + "edit_tokenizer": encoder.edit_tokenizer.get_args(), + "batch_args": batch_args, - "data_args": batch_args, "train_args": train_args, "dec_args": dec_args, }</s>
coeditor.ctx_change_encoder/C3ProblemTokenizer.tokenize_problem
Modified
temp-1
237e7217d129b1ba160716f2fcb074989bbdd22d
Implement training for the new data formats.
<0>:<add> named_references=kept_refs,
# module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: <s> + below_chunks - all_refs = above_chunks + below_chunks + named_references + ref_size_sum = sum(len(ref) for _, ref in all_refs) + + # compute the references that are relevant to this span + if ref_size_sum < self.max_ref_tks_sum: + changed = self._group_encode_changed_refs(problem.relevant_changes) + for i, chunk in enumerate(changed): + all_refs.append((f"changed ref {i}", TkArray.new(chunk))) + ref_size_sum += sum(len(x) for x in changed) + if ref_size_sum < self.max_ref_tks_sum: + unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged) + for i, chunk in enumerate(unchanged): + all_refs.append((f"unchanged ref {i}", TkArray.new(chunk))) + + # take until we hit the limit + ref_size_sum = 0 + kept_refs = list[tuple[str, TkArray]]() + for (name, ref) in all_refs: + if ref_size_sum + len(ref) > self.max_ref_tks_sum: + continue + ref_size_sum += len(ref) + kept_refs.append((name, ref)) return TkC3Problem( TkArray.new(scope_tks + chunk_input), TkArray.new(chunk_output), path=span.headers[-1].path, change_type=problem.change_type, - named_references=all_refs, <0> project=problem.src_info["project"], commit=problem.src_info["commit"], )
===========above chunk 0=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -1 <s>tks = tk_delta.for_input_range((0, edit_start)).apply_to_input(above_tks) below_tks = join_list(origin_lines[edit_end:] + [TokenSeq()], Newline_id) chunk_input, above_tks, below_tks = self._inline_some_context( chunk_input, above_tks, below_tks, input_limit ) chunk_output = truncate_section( chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False, inplace=True, ) above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) if not below_tks: below_chunks = [] else: below_chunks = break_into_chunks( below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, ) above_chunks = [ (f"above chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(above_chunks) ] below_chunks = [ (f"below chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(below_chunks) ] + all_refs = above_chunks + below_chunks - all_refs = above_chunks + below_chunks + named_references + ref_size_sum</s> ===========above chunk 1=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -2 <s>) assert isinstance(problem.edit_lines, range), "Only support range for now" edit_start = problem.edit_lines[0] edit_end = problem.edit_lines[-1] + 1 scope_tks = self._encode_headers(span.headers, 0) input_limit = self.max_query_tks - len(scope_tks) chunk_input = TokenSeq() chunk_output = TokenSeq() for i in range(len(problem.edit_lines)): chunk_input.append(get_extra_id(i)) l = edit_start + i if l < len(origin_lines): chunk_input.extend(origin_lines[l]) chunk_input.append(Newline_id) line_change = join_list(tk_delta.get_line_change(l), Newline_id) chunk_output.append(get_extra_id(i)) chunk_output.extend(line_change) if line_change and line_change[-1] != Del_id: chunk_output.append(Newline_id) # limit the input size if it's too long chunk_input = truncate_section( chunk_input, TruncateAt.Right, input_limit, inplace=True ) chunk_output = truncate_output_tks(chunk_input, chunk_output) # try move some prev_change_tks into the input above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id) above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_input(above_tks</s> ===========above chunk 2=========== # module: coeditor.ctx_change_encoder @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -3 span = problem.span - named_references = list[tuple[str, TkArray]]() - # compute the references that are relevant to this span - relevant_chunks = self._group_encode_changed_refs(problem.relevant_changes) - for i, chunk in enumerate(relevant_chunks): - named_references.append((f"changed ref {i}", TkArray.new(chunk))) - relevant_chunks = self._group_encode_unchanged_refs(problem.relevant_unchanged) - for i, chunk in enumerate(relevant_chunks): - named_references.append((f"unchanged ref {i}", TkArray.new(chunk))) original: TokenSeq tk_delta: TkDelta original, tk_delta = change_tks_to_original_delta(span.change_tks.tolist()) origin_lines = split_list(original, Newline</s> ===========unchanged ref 0=========== at: cachetools.lru LRUCache(maxsize: int, getsizeof: Optional[Callable[[_VT], int]]=...) at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.ctx_change_encoder C3Problem(span: ChangedCodeSpan, edit_lines: Collection[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo) TkC3Problem(input: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None) at: coeditor.ctx_change_encoder.C3Problem span: ChangedCodeSpan edit_lines: Collection[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] change_type: Change[None] src_info: SrcInfo at: coeditor.ctx_change_encoder.C3ProblemTokenizer _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq] at: coeditor.ctx_change_encoder.ChangedCodeSpan headers: Sequence[ChangedHeader] change_tks: TkArray line_range: LineRange module: ModuleName at: coeditor.ctx_change_encoder.TkC3Problem input: TkArray output: TkArray path: ProjectPath change_type: Change[None]
coeditor.common/rec_add_dict_to
Modified
temp-1
c88166371fbeac9f60c5f8d06dda5c9e20b3f46f
Clean up project. Remove unused code.
<0>:<add> if isinstance(v, Mapping):
# module: coeditor.common def rec_add_dict_to( target: dict[str, Any], + value: Mapping[str, Any], - value: dict[str, Any], value_merger: Callable[[Any, Any], Any] = lambda x, y: x + y, ): for k, v in value.items(): - if isinstance(v, dict): <0> if k not in target: target[k] = {} rec_add_dict_to(target[k], v, value_merger) elif isinstance(v, list): target.setdefault(k, []).extend(v) else: if k in target: target[k] = value_merger(target[k], v) else: target[k] = v
===========unchanged ref 0=========== at: coeditor.common rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y) at: typing Mapping = _alias(collections.abc.Mapping, 2) at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.common + def code_equal(code1: str, code2: str) -> bool: - def code_equal(code1: str | cst.CSTNode, code2: str | cst.CSTNode) -> bool: - if isinstance(code1, cst.CSTNode): - code1 = show_expr(code1) - if isinstance(code2, cst.CSTNode): - code2 = show_expr(code2) if code1 == code2: return True code1 = normalize_code_by_ast(code1) code2 = normalize_code_by_ast(code2) return code1 == code2
coeditor.dataset/_process_commits
Modified
temp-1
c88166371fbeac9f60c5f8d06dda5c9e20b3f46f
Clean up project. Remove unused code.
<0>:<add> rec_add_dict_to(stats, {"tlogger": scoped_changes._tlogger.times})
# module: coeditor.dataset def _process_commits( root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem], ) -> _ProcessingResult: # use process-specific parso cache + fix_jedi_cache(workdir) - _fix_jedi_cache(workdir) + scoped_changes._tlogger.clear() - coeditor.code_change._tlogger.clear() change_processor.clear_stats() change_processor.set_training(is_training) try: # cannot return here since subprocess will be killed after returning edits = edits_from_commit_history( root, commits, tempdir=workdir / "code", change_processor=change_processor, silent=True, time_limit=time_limit_per_commit * (len(commits) + 10), ) except Exception as e: if isinstance(e, KeyboardInterrupt): raise warnings.warn(f"Failed to process project: {root}\nError: {e}") traceback.print_exception(e, limit=-6) edits = [] stats = dict() change_processor.append_stats(stats) - rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times}) <0> return _ProcessingResult(edits, stats)
===========unchanged ref 0=========== at: _warnings warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None at: coeditor._utils.TimeLogger times: dict[str, list[float]] = field(default_factory=dict) clear() at: coeditor.common rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y) at: coeditor.dataset _ProcessingResult(edits: Sequence[C3Problem], stats: dict[str, dict | Any]) time_limit_per_commit = 10.0 at: coeditor.git CommitInfo(hash: str, parents: tuple[str, ...], msg: str) at: coeditor.scoped_changes _tlogger = TimeLogger() ProjectChangeProcessor() edits_from_commit_history(project_dir: Path, history: Sequence[CommitInfo], tempdir: Path, change_processor: ProjectChangeProcessor[TProb]=NoProcessing(), ignore_dirs=DefaultIgnoreDirs, silent: bool=False, time_limit: float | None=None) -> Sequence[TProb] at: coeditor.scoped_changes.ProjectChangeProcessor clear_stats() append_stats(stats: dict[str, Any]) -> None set_training(is_training: bool) -> None at: pathlib Path() at: traceback print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def set_training(self, is_training: bool) -> None: + return None + ===========changed ref 1=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def append_stats(self, stats: dict[str, Any]) -> None: + return None + ===========changed ref 2=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def clear_stats(self): + return None + ===========changed ref 3=========== + # module: coeditor.git + @dataclass(frozen=True) + class CommitInfo: + hash: str + parents: tuple[str, ...] + msg: str + ===========changed ref 4=========== # module: coeditor.common def rec_add_dict_to( target: dict[str, Any], + value: Mapping[str, Any], - value: dict[str, Any], value_merger: Callable[[Any, Any], Any] = lambda x, y: x + y, ): for k, v in value.items(): + if isinstance(v, Mapping): - if isinstance(v, dict): if k not in target: target[k] = {} rec_add_dict_to(target[k], v, value_merger) elif isinstance(v, list): target.setdefault(k, []).extend(v) else: if k in target: target[k] = value_merger(target[k], v) else: target[k] = v ===========changed ref 5=========== + # module: coeditor.scoped_changes + def edits_from_commit_history( + project_dir: Path, + history: Sequence[CommitInfo], + tempdir: Path, + change_processor: ProjectChangeProcessor[TProb] = NoProcessing(), + ignore_dirs=DefaultIgnoreDirs, + silent: bool = False, + time_limit: float | None = None, + ) -> Sequence[TProb]: + """Incrementally compute the edits to a project from the git history. + Note that this will change the file states in the project directory, so + you should make a copy of the project before calling this function. + """ + tempdir = tempdir.resolve() + if tempdir.exists(): + raise FileExistsError(f"Workdir '{tempdir}' already exists.") + use_fast_parser = jedi.settings.fast_parser + tempdir.mkdir(parents=True, exist_ok=False) + try: + run_command( + ["cp", "-r", str(project_dir / ".git"), str(tempdir)], + cwd=project_dir.parent, + ) + + return _edits_from_commit_history( + tempdir, + history, + change_processor, + ignore_dirs, + silent, + time_limit=time_limit, + ) + finally: + shutil.rmtree(tempdir) + jedi.settings.fast_parser = use_fast_parser + ===========changed ref 6=========== + # module: coeditor.scoped_changes + + ===========changed ref 7=========== + # module: coeditor.git + + ===========changed ref 8=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + @abstractmethod + def process_change( + self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any + ) -> Sequence[TProb]: + ... + ===========changed ref 9=========== + # module: coeditor.change + class _ChangeBase(Generic[E1]): + @abstractmethod + def later(self) -> E1: + ... + ===========changed ref 10=========== + # module: coeditor.change + class _ChangeBase(Generic[E1]): + @abstractmethod + def earlier(self) -> E1: + ... + ===========changed ref 11=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def post_edit_analysis( + self, + pstate: ProjectState, + modules: Mapping[RelPath, JModule], + changes: Mapping[ModuleName, JModuleChange], + ) -> Any: + return None + ===========changed ref 12=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def pre_edit_analysis( + self, + pstate: ProjectState, + modules: Mapping[RelPath, JModule], + changes: Mapping[ModuleName, JModuleChange], + ) -> Any: + return None + ===========changed ref 13=========== # module: coeditor.encoding - class EditEncoder(Generic[T1], ABC): - @abstractmethod - def encode_pedit( - self, - pedit: ProjectEdit, - training: bool, - ) -> Iterable[T1]: - pass - ===========changed ref 14=========== + # module: coeditor.scoped_changes + _Second = float + ===========changed ref 15=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + @staticmethod + def as_char(): + return "M" + ===========changed ref 16=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + def later(self) -> E1: + return self.after + ===========changed ref 17=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + def earlier(self) -> E1: + return self.before + ===========changed ref 18=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + @staticmethod + def as_char(): + return "D" + ===========changed ref 19=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + def later(self) -> E1: + return self.before +
coeditor.dataset/make_or_load_datasets
Modified
temp-1
c88166371fbeac9f60c5f8d06dda5c9e20b3f46f
Clean up project. Remove unused code.
<0>:<add> save_dir.mkdir(parents=True, exist_ok=True)
# module: coeditor.dataset def make_or_load_datasets( dataset_name: str, change_processor: ProjectChangeProcessor[C3Problem], recreate_data: bool = False, workers: int = DefaultWorkers, ) -> Mapping[str, Sequence[C3Problem]]: config_str = repr_modified_args(change_processor) + save_dir = get_dataset_dir(dataset_name) / "processed" / config_str - save_dir = get_dataset_dir(dataset_name) / config_str if recreate_data or not save_dir.exists(): if dataset_name == "SPOT": datasets = { "test": dataset_from_projects( [proj_root()], change_processor, [False], workers=workers ) } else: datasets = datasets_from_repos( get_dataset_dir(dataset_name) / "repos", change_processor, workers=workers, ) with timed_action("Saving datasets to disk"): <0> save_datasets(datasets, save_dir) print("Tokenized dataset saved to:", save_dir) else: with timed_action("Loading datasets from disk"): datasets = load_datasets(save_dir) size_info = run_command(["du", "-ha", "."], save_dir) print(f"Dataset sizes:") print(size_info) return datasets
===========unchanged ref 0=========== at: coeditor._utils DefaultWorkers: int = multiprocessing.cpu_count() // 2 global DefaultWorkers timed_action(name: str, silent: bool=False) repr_modified_args(instance, flatten: bool=False) -> str at: coeditor.common proj_root() -> Path get_dataset_dir(dataname: str) -> Path run_command(args: Sequence[str], cwd: str | Path) -> str at: coeditor.dataset dataset_from_projects(project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> "Mapping[Path, Sequence[C3Problem]]" datasets_from_repos(repos_root: Path, change_processor: ProjectChangeProcessor[C3Problem], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> Mapping[str, Sequence[C3Problem]] save_datasets(datasets: Mapping[str, Any], save_dir: Path) -> None load_datasets(save_dir: Path, splits=("test", "valid", "train")) -> Mapping[str, Any] at: coeditor.scoped_changes ProjectChangeProcessor() at: pathlib.Path __slots__ = () mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None exists() -> bool at: pathlib.PurePath __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") at: typing Mapping = _alias(collections.abc.Mapping, 2) ===========unchanged ref 1=========== Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.dataset def _process_commits( root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem], ) -> _ProcessingResult: # use process-specific parso cache + fix_jedi_cache(workdir) - _fix_jedi_cache(workdir) + scoped_changes._tlogger.clear() - coeditor.code_change._tlogger.clear() change_processor.clear_stats() change_processor.set_training(is_training) try: # cannot return here since subprocess will be killed after returning edits = edits_from_commit_history( root, commits, tempdir=workdir / "code", change_processor=change_processor, silent=True, time_limit=time_limit_per_commit * (len(commits) + 10), ) except Exception as e: if isinstance(e, KeyboardInterrupt): raise warnings.warn(f"Failed to process project: {root}\nError: {e}") traceback.print_exception(e, limit=-6) edits = [] stats = dict() change_processor.append_stats(stats) + rec_add_dict_to(stats, {"tlogger": scoped_changes._tlogger.times}) - rec_add_dict_to(stats, {"tlogger": coeditor.code_change._tlogger.times}) return _ProcessingResult(edits, stats) ===========changed ref 1=========== + # module: coeditor.scoped_changes + + ===========changed ref 2=========== + # module: coeditor.git + + ===========changed ref 3=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + @abstractmethod + def process_change( + self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any + ) -> Sequence[TProb]: + ... + ===========changed ref 4=========== + # module: coeditor.change + class _ChangeBase(Generic[E1]): + @abstractmethod + def later(self) -> E1: + ... + ===========changed ref 5=========== + # module: coeditor.change + class _ChangeBase(Generic[E1]): + @abstractmethod + def earlier(self) -> E1: + ... + ===========changed ref 6=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def set_training(self, is_training: bool) -> None: + return None + ===========changed ref 7=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def append_stats(self, stats: dict[str, Any]) -> None: + return None + ===========changed ref 8=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def clear_stats(self): + return None + ===========changed ref 9=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def post_edit_analysis( + self, + pstate: ProjectState, + modules: Mapping[RelPath, JModule], + changes: Mapping[ModuleName, JModuleChange], + ) -> Any: + return None + ===========changed ref 10=========== + # module: coeditor.scoped_changes + class ProjectChangeProcessor(Generic[TProb], ABC): + def pre_edit_analysis( + self, + pstate: ProjectState, + modules: Mapping[RelPath, JModule], + changes: Mapping[ModuleName, JModuleChange], + ) -> Any: + return None + ===========changed ref 11=========== # module: coeditor.encoding - class EditEncoder(Generic[T1], ABC): - @abstractmethod - def encode_pedit( - self, - pedit: ProjectEdit, - training: bool, - ) -> Iterable[T1]: - pass - ===========changed ref 12=========== + # module: coeditor.scoped_changes + _Second = float + ===========changed ref 13=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + @staticmethod + def as_char(): + return "M" + ===========changed ref 14=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + def later(self) -> E1: + return self.after + ===========changed ref 15=========== + # module: coeditor.change + @dataclass(frozen=True) + class Modified(_ChangeBase[E1]): + def earlier(self) -> E1: + return self.before + ===========changed ref 16=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + @staticmethod + def as_char(): + return "D" + ===========changed ref 17=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + def later(self) -> E1: + return self.before + ===========changed ref 18=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + def earlier(self) -> E1: + return self.before + ===========changed ref 19=========== + # module: coeditor.change + @dataclass(frozen=True) + class Deleted(_ChangeBase[E1]): + before: E1 + ===========changed ref 20=========== + # module: coeditor.change + @dataclass(frozen=True) + class Added(_ChangeBase[E1]): + @staticmethod + def as_char(): + return "A" + ===========changed ref 21=========== + # module: coeditor.change + @dataclass(frozen=True) + class Added(_ChangeBase[E1]): + def later(self) -> E1: + return self.after +
coeditor._utils/replace_strs_by_pos
Modified
temp-1
19b037f36e74d7495cee5705bd945be6886bb6eb
Remove unused dependencies. - Update setup.py
<0>:<add> last_pos = CodePosition((len(lines), len(lines[-1]) + 1))
# module: coeditor._utils def replace_strs_by_pos(original: str, replaces: Sequence[tuple[CodeRange, int, str]]): <s>"ptr: {ptr}, target: {target}" + out_segs.append(lines[ptr[0] - 1][ptr[1] - 1 : target[1] - 1]) - out_segs.append(lines[ptr.line - 1][ptr.column - 1 : target.column - 1]) ptr = target + replaces_sorted = sorted(replaces, key=lambda x: (as_tuple(x[0][0]), x[1])) - replaces_sorted = sorted(replaces, key=lambda x: (as_tuple(x[0].start), x[1])) - # for (r1, t1), (r2, t2) in zip(replaces_sorted, replaces_sorted[1:]): - # assert as_tuple(r1.end) <= as_tuple( - # r2.start - # ), f"overlapping ranges:\n {r1}: {t1}\n {r2}: {t2}" while bool(replaces_sorted): r, _, rtext = replaces_sorted.pop(0) try: + advance_to(r[0], True) - advance_to(r.start, True) except IndexError: raise IndexError( + f"{r[0]} is out of range. Trying to replace with text <<{rtext}>>. Original str:\n<<{original}>>" - f"{r.start} is out of range. Trying to replace with text <<{rtext}>>. Original str:\n<<{original}>>" ) + advance_to(r[1], False) - advance_to(r.end, False) out_segs.append(rtext) - last_pos = CodePosition(len(lines), len(lines[-1]) + 1) <0> advance_to(last_pos, True) return "".join(out_segs)
===========above chunk 0=========== # module: coeditor._utils def replace_strs_by_pos(original: str, replaces: Sequence[tuple[CodeRange, int, str]]): # offset: -1 """Replace the parts specificed by `replaces` with the given strings. Each entry of `replaces` is a tuple of (code_range, priority, new_str).""" def as_tuple(p: CodePosition): + return p - return (p.line, p.column) lines = original.split("\n") out_segs = list[str]() + ptr = CodePosition((1, 1)) - ptr = CodePosition(1, 1) def advance_to(target: CodePosition, output: bool): nonlocal ptr if as_tuple(target) <= as_tuple(ptr): return + assert ptr[0] <= target[0], f"ptr: {ptr}, target: {target}" - assert ptr.line <= target.line, f"ptr: {ptr}, target: {target}" if output: + while ptr[0] < target[0]: - while ptr.line < target.line: + out_segs.append(lines[ptr[0] - 1][ptr[1] - 1 :]) - out_segs.append(lines[ptr.line - 1][ptr.column - 1 :]) out_segs.append("\n") + ptr = CodePosition((ptr[0] + 1, 1)) - ptr = CodePosition(ptr.line + 1, 1) + assert ptr[0] == target[0], f"ptr: {ptr}, target: {target}" - assert ptr.line == target.line, f"ptr: {ptr}, target: {target}" + out_segs.append(lines[ptr[0] - 1][</s> ===========unchanged ref 0=========== at: coeditor._utils CodePosition = NewType("CodePosition", tuple[int, int]) at: typing Sequence = _alias(collections.abc.Sequence, 1)
coeditor._utils/show_code_range
Modified
temp-1
19b037f36e74d7495cee5705bd945be6886bb6eb
Remove unused dependencies. - Update setup.py
<0>:<add> return f"[{crange[0][0]}:{crange[0][1]+1}--{crange[1][0]}:{crange[1][1]+1}]"
# module: coeditor._utils def show_code_range(crange: CodeRange) -> str: - return f"[{crange.start.line}:{crange.start.column+1}--{crange.end.line}:{crange.end.column+1}]" <0>
===========changed ref 0=========== # module: coeditor._utils + CodePosition = NewType("CodePosition", tuple[int, int]) + CodeRange = tuple[CodePosition, CodePosition] + _pushover_warned = [False] ===========changed ref 1=========== # module: coeditor._utils def replace_strs_by_pos(original: str, replaces: Sequence[tuple[CodeRange, int, str]]): """Replace the parts specificed by `replaces` with the given strings. Each entry of `replaces` is a tuple of (code_range, priority, new_str).""" def as_tuple(p: CodePosition): + return p - return (p.line, p.column) lines = original.split("\n") out_segs = list[str]() + ptr = CodePosition((1, 1)) - ptr = CodePosition(1, 1) def advance_to(target: CodePosition, output: bool): nonlocal ptr if as_tuple(target) <= as_tuple(ptr): return + assert ptr[0] <= target[0], f"ptr: {ptr}, target: {target}" - assert ptr.line <= target.line, f"ptr: {ptr}, target: {target}" if output: + while ptr[0] < target[0]: - while ptr.line < target.line: + out_segs.append(lines[ptr[0] - 1][ptr[1] - 1 :]) - out_segs.append(lines[ptr.line - 1][ptr.column - 1 :]) out_segs.append("\n") + ptr = CodePosition((ptr[0] + 1, 1)) - ptr = CodePosition(ptr.line + 1, 1) + assert ptr[0] == target[0], f"ptr: {ptr}, target: {target}" - assert ptr.line == target.line, f"ptr: {ptr}, target: {target}" + out_segs.append(lines[ptr[0] - 1][ptr[1] - 1 : target[1] - 1]) - out_segs.append(lines[ptr.line - 1][ptr.column - 1 : target.column - 1]) ptr = target + replaces_sorted = sorted(replaces, key=lambda x: (as_</s> ===========changed ref 2=========== # module: coeditor._utils def replace_strs_by_pos(original: str, replaces: Sequence[tuple[CodeRange, int, str]]): # offset: 1 <s>column - 1]) ptr = target + replaces_sorted = sorted(replaces, key=lambda x: (as_tuple(x[0][0]), x[1])) - replaces_sorted = sorted(replaces, key=lambda x: (as_tuple(x[0].start), x[1])) - # for (r1, t1), (r2, t2) in zip(replaces_sorted, replaces_sorted[1:]): - # assert as_tuple(r1.end) <= as_tuple( - # r2.start - # ), f"overlapping ranges:\n {r1}: {t1}\n {r2}: {t2}" while bool(replaces_sorted): r, _, rtext = replaces_sorted.pop(0) try: + advance_to(r[0], True) - advance_to(r.start, True) except IndexError: raise IndexError( + f"{r[0]} is out of range. Trying to replace with text <<{rtext}>>. Original str:\n<<{original}>>" - f"{r.start} is out of range. Trying to replace with text <<{rtext}>>. Original str:\n<<{original}>>" ) + advance_to(r[1], False) - advance_to(r.end, False) out_segs.append(rtext) + last_pos = CodePosition((len(lines), len(lines[-1]) + 1)) - last_pos = CodePosition(len(lines), len(lines[-1]) + 1) advance_to(last_pos, True) return "".join(out_segs)
coeditor._utils/add_line_numbers
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> return "\n".join(format_s.format(ln=i + start, line=l) for i, l in enumerate(lines))
# module: coeditor._utils + def add_line_numbers(code: str, start: int = 1): - def add_line_numbers(code: str): lines = code.split("\n") ln_digits = int(math.log(len(lines), 10)) + 1 format_s = "{ln:" + str(ln_digits) + "d}| {line}" - return "\n".join(format_s.format(ln=i + 1, line=l) for i, l in enumerate(lines)) <0>
===========unchanged ref 0=========== at: math log(x: SupportsFloat, base: SupportsFloat=...) -> float
coeditor.encoding/TokenizedEdit.show_prediction
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> line = " |" + show_content(line_tks)
# module: coeditor.encoding class TokenizedEdit(ABC): def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: <s>_id) seg = seg + origin_line label = show_label(id_map.get(k, -1)) lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}") return "".join(lines) main_segs = output_ids_as_seqs(self.main_tks) id_map = {k: i for i, k in enumerate(main_segs)} main_lines = list[str]() for line_tks in split_list(self.main_tks, Newline_id): if line_tks and is_extra_id(line_tks[0]): + prefix = show_label(id_map.get(line_tks[0], -1)) - line = show_label(id_map.get(line_tks[0], -1)) + decode_tokens( - line_tks[1:] - ) + line = prefix + show_content(line_tks[1:]) else: - line = decode_tokens(line_tks) <0> main_lines.append(line) pred_lines = ( ["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"] if pred_tks else [] ) outputs = [ "-" * 80, *self.meta_data_lines(), "========Ground Truth========", show_extra_tokens(self.output_tks, main_segs), *pred_lines, "========Main Code========", "\n".join(main_lines), ] + [ f"==========={name}===========\n" + decode_tokens(tks) for name, tks in self.all_ctxs().items() ] return "\n".join(outputs)
===========above chunk 0=========== # module: coeditor.encoding class TokenizedEdit(ABC): def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: # offset: -1 def show_label(i: int): return f" <{i}>" if i <= 9 else f"<{i}>" + + def show_content(tks: TokenSeq): + if tks and tks[0] == Add_id: + return "+ " + decode_tokens(tks[1:]) + elif tks and tks[0] == Del_id: + return "- " + decode_tokens(tks[1:]) + else: + return " " + decode_tokens(tks) def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]): segs = output_ids_as_seqs(tks) lines = [] for k, seg in segs.items(): if not seg: continue # skip empty lines if seg[-1] == Del_id: + # show the deleted line - # show the delted line + origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0] - origin_line = main_tk_lines.get(k, []) + origin_line.append(Newline_id) seg = seg + origin_line label = show_label(id_map.get(k, -1</s> ===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding BOS_id = get_tk_id("<s>") EOS_id = get_tk_id("</s>") at: typing Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def empty() -> "TkDelta": + return TkDelta({}) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]: + "Return the original input and the delta." + return change_tks_to_original_delta(change_tks) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def get_new_target_lines(self, lines: Sequence[int]) -> Sequence[int]: + """Given a list of lines to edit, return the corresponding new lines to edit + after applying this delta.""" + new_edit_lines = list[int]() + offset = 0 + for l in lines: + deleted = False + for act in self.get_line_change(l): + if act[0] == Add_id: + new_edit_lines.append(l + offset) + offset += 1 + elif act[0] == Del_id: + deleted = True + if not deleted: + new_edit_lines.append(l + offset) + return tuple(new_edit_lines) + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def __getitem__(self, key: DeltaKey) -> TokenSeq: + line, i = key + return self._deltas[line][i] + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def changed_lines(self) -> Collection[int]: + return self._deltas.keys() + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def keys(self) -> Iterable[DeltaKey]: + for k, _ in self.items(): + yield k + ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def items(self) -> Iterable[tuple[DeltaKey, TokenSeq]]: + for l, acts in self._deltas.items(): + for i, act in enumerate(acts): + yield DeltaKey((l, i)), act + ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" for k, a in self._deltas.items() - f" {k}: {tuple(map(decode_tokens, a))}" - for k, a in self._deltas.items() - if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def decompose_for_input( + self, first_keys: Collection[DeltaKey] + ) -> tuple[Self, Self]: + """ + Decompose the delta into two deltas such that applying them sequentially + using `apply_to_input` is equivalent to applying the original delta. + """ + key_set = set(first_keys) + acts1 = dict[int, list[TokenSeq]]() + acts2 = dict[int, list[TokenSeq]]() + l_shift = 0 + for l, acts in self._deltas.items(): + for i, act in enumerate(acts): + key = DeltaKey((l, i)) + if key in key_set: + acts1.setdefault(l, []).append(act) + l_shift += 1 if act[0] == Add_id else -1 + else: + acts2.setdefault(l + l_shift, []).append(act) + delta1 = TkDelta({k: tuple(v) for k, v in acts1.items()}) + delta2 = TkDelta({k: tuple(v) for k, v in acts2.items()}) + return delta1, delta2 + ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def decompose_for_change( + self, first_keys: Collection[DeltaKey] + ) -> tuple[Self, Self]: + """ + Decompose the delta into two deltas such that applying them sequentially + using `to_change_tks` is equivalent to applying the original delta. + """ + + key_set = set(first_keys) + acts1 = dict[int, list[TokenSeq]]() + acts2 = dict[int, list[TokenSeq]]() + l_shift = 0 + for l, acts in self._deltas.items(): + for i, act in enumerate(acts): + key = DeltaKey((l, i)) + if key in key_set: + acts1.setdefault(l, []).append(act) + if act[0] == Add_id: + l_shift += 1 + else: + assert act[0] == Del_id + # the additions cannot be applied to a <del> line + if prev_acts := acts2.pop(l + l_shift, None): + acts2[l + l_shift + 1] = prev_acts + else: + acts2.setdefault(l + l_shift, []).append(act) + delta1 = TkDelta({k: tuple(v) for k, v in acts1.items()}) + delta2 = TkDelta({k: tuple(v) for k, v in acts2.items()}) + return delta1, delta2 +
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
# module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: <s> + 1 # limit the input size if it's too long chunk_input = truncate_section( chunk_input, TruncateAt.Right, input_limit, inplace=True ) chunk_output = truncate_output_tks(chunk_input, chunk_output) # try move some prev_change_tks into the input above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id) + above_tks = tk_delta.for_input_range((0, edit_start)).to_change_tks(above_tks) - above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_input(above_tks) - below_tks = join_list(origin_lines[edit_end:] + [TokenSeq()], Newline_id) <0> chunk_input, above_tks, below_tks = self._inline_some_context( chunk_input, above_tks, below_tks, input_limit ) chunk_output = truncate_section( chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False, inplace=True, ) above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) if not below_tks: below_chunks = [] else: below_chunks = break_into_chunks( below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size</s>
===========above chunk 0=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -1 <s> = change_tks_to_original_delta(span.change_tks.tolist()) origin_lines = split_list(original, Newline_id) - assert isinstance(problem.edit_lines, range), "Only support range for now" edit_start = problem.edit_lines[0] - edit_end = problem.edit_lines[-1] + 1 scope_tks = self._encode_headers(span.headers, 0) input_limit = self.max_query_tks - len(scope_tks) chunk_input = TokenSeq() chunk_output = TokenSeq() + last_line = edit_start + for i, l in enumerate(problem.edit_lines): - for i in range(len(problem.edit_lines)): + for line in origin_lines[last_line + 1 : l]: + chunk_input.extend(line) + chunk_input.append(Newline_id) + chunk_input.append(get_extra_id(i)) - l = edit_start + i if l < len(origin_lines): chunk_input.extend(origin_lines[l]) chunk_input.append(Newline_id) + last_line = l line_change = join_list(tk_delta.get_line_change(l), Newline_id) chunk_output.append(get_extra_id(i)) chunk_output.extend(line_change) if line_change and line_change[-1] != Del_id: chunk_output.append(Newline_id) + if len(chunk_input) > input_limit: + break + edit_stop = last_line + 1 # limit the input size if it's too long chunk_input = truncate_section( chunk_input</s> ===========above chunk 1=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -2 span = problem.span + original: TokenSeq = span.original.tolist() - original: TokenSeq + tk_delta: TkDelta = span.delta - tk_delta: TkDelta - original, tk_delta = change_tks_to_original_delta(span.change_tks.tolist()) origin_lines = split_</s> ===========below chunk 0=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: 1 <s> below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, ) above_chunks = [ (f"above chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(above_chunks) ] below_chunks = [ (f"below chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(below_chunks) ] all_refs = above_chunks + below_chunks ref_size_sum = sum(len(ref) for _, ref in all_refs) # compute the references that are relevant to this span if ref_size_sum < self.max_ref_tks_sum: changed = self._group_encode_changed_refs(problem.relevant_changes) for i, chunk in enumerate(changed): all_refs.append((f"changed ref {i}", TkArray.new(chunk))) ref_size_sum += sum(len(x) for x in changed) if ref_size_sum < self.max_ref_tks_sum: unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged) for i, chunk in enumerate(unchanged): all_refs.append((f"unchanged ref {i}", TkArray.new(chunk))) # take until we hit the limit ref_size_sum = 0 kept_refs = list[tuple[str, TkArray]]() for (name, ref) in all_refs: if ref_size_sum + len(ref) > self.max_ref_tks_sum: continue ref_size_sum += len(ref) kept_refs</s> ===========below chunk 1=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: 2 <s>.max_ref_tks_sum: continue ref_size_sum += len(ref) kept_refs.append((name, ref)) return TkC3Problem( TkArray.new(scope_tks + chunk_input), TkArray.new(chunk_output), path=span.headers[-1].path, change_type=problem.change_type, named_references=kept_refs, project=problem.src_info["project"], commit=problem.src_info["commit"], ) ===========unchanged ref 0=========== at: cachetools.lru LRUCache(maxsize: int, getsizeof: Optional[Callable[[_VT], int]]=...) at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_lines: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.C3ProblemTokenizer _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.change Change = Added[E1] | Deleted[E1] | Modified[E1] at: coeditor.change.Added after: E1 as_char() at: coeditor.change.Deleted before: E1 as_char() at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False as_char() at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) at: coeditor.encoding Newline_id = get_tk_id("\n")
coeditor.c3problem/C3ProblemTokenizer._group_encode_changed_refs
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> segs.append(c_tks)
# module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _group_encode_changed_refs( self, changes: Sequence[ChangedCodeSpan] ) -> Sequence[TokenSeq]: module2changes = groupby(changes, lambda c: c.module) all_chunks = list[TokenSeq]() for change_group in module2changes.values(): change_group.sort(key=lambda c: c.line_range[0]) segs = list[TokenSeq]() # we'll add module as the chunk header, so we start within the module last_scope = change_group[0].headers[:1] for c in change_group: header_diff = list[ChangedHeader]() for i, h in enumerate(c.headers): if i >= len(last_scope) or h.path != last_scope[i].path: header_diff.append(h) if header_diff: header_tks = self._encode_headers(header_diff, 0) segs.append(header_tks) + c_tks = c.delta.to_change_tks(c.original.tolist()) - segs.append(c.change_tks.tolist()) <0> segs.append([Newline_id, Newline_id]) last_scope = c.headers segs.append([Newline_id]) mod_change = change_group[0].headers[:1] mod_chunks = break_into_chunks( join_list(segs), lambda i: self._encode_headers(mod_change, i), self.max_ref_tks, overlap=self.ref_chunk_overlap, ) all_chunks.extend(mod_chunks) return all_chunks
===========unchanged ref 0=========== at: coeditor.c3problem TkC3Problem(input: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None) at: coeditor.c3problem.C3Problem relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] at: coeditor.c3problem.C3ProblemTokenizer max_ref_tks_sum: int = 512 * 12 _group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] _group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] at: coeditor.c3problem.C3ProblemTokenizer.tokenize_problem above_chunks = [ (f"above chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(above_chunks) ] above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) at: coeditor.tk_array TkArray() at: coeditor.tk_array.TkArray new(tks: Sequence[int]) -> "TkArray" ===========changed ref 0=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + @abstractmethod + def transform(self, prob: C3Problem) -> Iterable[C3Problem]: + ... + ===========changed ref 1=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + "A strategy to generate new C3 problems from the orginal ones." + ===========changed ref 2=========== # module: coeditor.c3problem + class C3ProblemSimpleSplit(C3ProblemTransformer): + "Simply split the problem into fixed-sized editing ranges." + max_lines_to_edit: int = 25 + max_split_factor: int = 4 + ===========changed ref 3=========== # module: coeditor.c3problem + class C3ProblemChangeDropout(C3ProblemTransformer): + """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, + but also randomly keep some subset of changes in the input.""" + + max_lines_to_edit: int = 25 + max_split_factor: int = 4 + # the probability of dropping out some changes into the input + dropout_prob: float = 0.5 + # when dropping the changes into the input, the biggest ratio of changes to drop + max_dropout_ratio: float = 0.5 + ===========changed ref 4=========== # module: coeditor.c3problem + class C3ProblemSimpleSplit(C3ProblemTransformer): + def transform(self, prob: C3Problem) -> Sequence[C3Problem]: + delta = prob.span.delta + l_range = prob.edit_lines + assert isinstance(l_range, range) + start, stop = l_range.start, l_range.stop + problems = list[C3Problem]() + new_trans = prob.transformations + ("split",) + for i in range(start, stop, self.max_lines_to_edit): + j = min(i + self.max_lines_to_edit, stop) + sub_delta = delta.for_input_range((i, j)) + if sub_delta.num_changes() > 0: + sub_prob = dataclasses.replace( + prob, edit_lines=range(i, j), transformations=new_trans + ) + problems.append(sub_prob) + if len(problems) >= self.max_split_factor: + break + return problems + ===========changed ref 5=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): + """ + ### Change log + - v2.3: always generate problems with full editing range and move the problem + splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. + """ + + VERSION = "2.3" - VERSION = "2.2" # change spans with more than this many lines will be ignored max_span_lines: int = 500 - max_lines_to_edit: int = 25 - max_problems_per_elem: int = 4 ===========changed ref 6=========== # module: coeditor.c3problem @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + original: TkArray + delta: TkDelta - change_tks: TkArray # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 7=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan # the lines to be edited + edit_lines: Sequence[int] - edit_lines: Collection[int] # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated change_type: Change[None] src_info: SrcInfo + transformations: tuple[str, ...] = () ===========changed ref 8=========== # module: coeditor.c3problem + class C3ProblemChangeDropout(C3ProblemTransformer): + def transform(self, prob: C3Problem) -> Sequence[C3Problem]: + original = prob.span.original + delta = prob.span.delta + l_range = prob.edit_lines + assert isinstance(l_range, range) + start, stop = l_range.start, l_range.stop + problems = list[C3Problem]() + for i in range(start, stop, self.max_lines_to_edit): + j = min(i + self.max_lines_to_edit, stop) + sub_delta = delta.for_input_range((i, j)) + if sub_delta.num_changes() == 0: + continue + grouped_keys = sub_delta.change_groups() + should_dropout = ( + len(grouped_keys) >= 2 and random.random() < self.dropout_prob + ) + if should_dropout: + n_to_drop = int( + len(grouped_keys) * random.random() * self.max_dropout_ratio + ) + assert n_to_drop < len(grouped_keys) + keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) + else: + keys_to_drop = [] + if keys_to_drop: + _, delta2 = sub_delta.decompose_for_change(keys_to_drop) + if delta2.num_changes() == 0: + continue + delta1, delta2 = delta.decompose_for_change(keys_to_drop) + new_original = TkArray.new(delta1.to_change_tks(original.tolist())) + new_trans = prob.transformations + ("split", "dropout") + new_span = dataclasses.replace( + prob.span, original=new_original, delta=delta2 + ) + </s>
coeditor.model/RetrievalDecodingResult.exact_match_accuracy
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> original = prob.span.original.tolist()
# module: coeditor.model @dataclass class RetrievalDecodingResult: def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]: ex2correct = dict[int, bool]() for i, mp in enumerate(self.predictions): prob = self.problems[i] - change_tks = prob.span.change_tks.tolist() - original, _ = change_tks_to_original_delta(change_tks) <0> pred_delta = TkDelta.from_output_tks(mp["output_ids"]) label_delta = TkDelta.from_output_tks(mp["labels"]) assert isinstance(prob.edit_lines, range) line_shift = prob.edit_lines.start pred_code = pred_delta.shifted(line_shift).apply_to_input(original) label_code = label_delta.shifted(line_shift).apply_to_input(original) is_correct = code_equal(decode_tokens(pred_code), decode_tokens(label_code)) ex2correct[i] = is_correct correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct)) return correct_count, ex2correct
===========unchanged ref 0=========== at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_line_ids: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Mapping["PyFullName", "PyDefinition"] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.common CountedSum = WeightedSum[int, int] code_equal(code1: str, code2: str) -> bool at: coeditor.encoding decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] apply_to_input(input: TokenSeq) shifted(shift_lines: int) -> Self from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta" at: coeditor.model.RetrievalDecodingResult eval_args: dict problems: Sequence[C3Problem] predictions: Sequence[RetrievalModelPrediction] at: coeditor.tk_array.TkArray tolist() -> TokenSeq ===========changed ref 0=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + @abstractmethod + def transform(self, prob: C3Problem) -> Iterable[C3Problem]: + ... + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def empty() -> "TkDelta": + return TkDelta({}) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def changed_lines(self) -> Collection[int]: + return self._deltas.keys() + ===========changed ref 3=========== # module: coeditor.common + SEP = "-" * 80 HtmlCode = str ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def keys(self) -> Iterable[DeltaKey]: + for k, _ in self.items(): + yield k + ===========changed ref 5=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + "A strategy to generate new C3 problems from the orginal ones." + ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def __getitem__(self, key: DeltaKey) -> TokenSeq: + line, i = key + return self._deltas[line][i] + ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]: + "Return the original input and the delta." + return change_tks_to_original_delta(change_tks) + ===========changed ref 8=========== # module: coeditor.common + def print_sections( + sections: list[tuple[str, str]], + sep: str = SEP, + ) -> None: + for title, content in sections: + print(sep) + print(f"{title}:") + print(content) + ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def items(self) -> Iterable[tuple[DeltaKey, TokenSeq]]: + for l, acts in self._deltas.items(): + for i, act in enumerate(acts): + yield DeltaKey((l, i)), act + ===========changed ref 10=========== # module: coeditor.c3problem + class C3ProblemSimpleSplit(C3ProblemTransformer): + "Simply split the problem into fixed-sized editing ranges." + max_lines_to_edit: int = 25 + max_split_factor: int = 4 + ===========changed ref 11=========== # module: coeditor.encoding + # (line, action id) + DeltaKey = NewType("DeltaKey", tuple[int, int]) + TEdit = TypeVar("TEdit", bound=TokenizedEdit) ===========changed ref 12=========== # module: coeditor.c3problem @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + original: TkArray + delta: TkDelta - change_tks: TkArray # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 13=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" for k, a in self._deltas.items() - f" {k}: {tuple(map(decode_tokens, a))}" - for k, a in self._deltas.items() - if a ) return f"TkDelta(\n{line_diffs}\n)" ===========changed ref 14=========== # module: coeditor.c3problem + class C3ProblemChangeDropout(C3ProblemTransformer): + """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, + but also randomly keep some subset of changes in the input.""" + + max_lines_to_edit: int = 25 + max_split_factor: int = 4 + # the probability of dropping out some changes into the input + dropout_prob: float = 0.5 + # when dropping the changes into the input, the biggest ratio of changes to drop + max_dropout_ratio: float = 0.5 + ===========changed ref 15=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): + """ + ### Change log + - v2.3: always generate problems with full editing range and move the problem + splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. + """ + + VERSION = "2.3" - VERSION = "2.2" # change spans with more than this many lines will be ignored max_span_lines: int = 500 - max_lines_to_edit: int = 25 - max_problems_per_elem: int = 4 ===========changed ref 16=========== # module: coeditor._utils + def add_line_numbers(code: str, start: int = 1): - def add_line_numbers(code: str): lines = code.split("\n") ln_digits = int(math.log(len(lines), 10)) + 1 format_s = "{ln:" + str(ln_digits) + "d}| {line}" + return "\n".join(format_s.format(ln=i + start, line=l) for i, l in enumerate(lines)) - return "\n".join(format_s.format(ln=i + 1, line=l) for i, l in enumerate(lines))
coeditor.model/RetrievalEditorModel.train_on_data
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> logging_steps=max(1, min(1000, epoch_steps // 10)),
# module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): def train_on_data( self, training_name: str, train_loader: "C3DataLoader", eval_loader: "C3DataLoader", train_args: "TrainingArgs", ) -> None: <s>_loader(as_any(dataloader)) n_samples = metrics["loss_per_ex"].weight metrics = { f"{metric_key_prefix}_{k}": v.mean() for k, v in metrics.items() } return EvalLoopOutput( predictions=tuple(), label_ids=tuple(), metrics=metrics, num_samples=n_samples, ) epoch_steps = len(train_loader) cprint("blue", "Number of training batches (estimate):", epoch_steps) trainer_args = Seq2SeqTrainingArguments( output_dir=str(train_dir), overwrite_output_dir=True, evaluation_strategy="epoch", save_strategy="epoch", - logging_steps=max(1, epoch_steps // 10), <0> num_train_epochs=train_args.max_train_epochs, save_total_limit=2, lr_scheduler_type=train_args.lr_scheduler_type, learning_rate=train_args.learning_rate, weight_decay=train_args.weight_decay, metric_for_best_model="loss_per_tk", greater_is_better=False, fp16=True, load_best_model_at_end=True, push_to_hub=False, report_to=["wandb"], disable_tqdm=True, # torchdynamo="inductor", # use compiled model ) trainer = DynamicTrainer( self, trainer_args, callbacks=[EarlyStoppingCallback(early_stopping_patience=1)], ) trainer.train()</s>
===========above chunk 0=========== # module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): def train_on_data( self, training_name: str, train_loader: "C3DataLoader", eval_loader: "C3DataLoader", train_args: "TrainingArgs", ) -> None: # offset: -1 train_dir = get_model_dir(trained=False) / training_name eval_loader.tqdm_args = {"disable": True} model = self # model = torch.compile(self.to("cuda")) # pytorch doesn't support python 3.11 yet. class DynamicTrainer(Seq2SeqTrainer): def get_train_dataloader(self): return train_loader def get_eval_dataloader(self, eval_dataset): return eval_loader def evaluation_loop( self, dataloader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: metrics = model.eval_loss_on_loader(as_any(dataloader)) n_samples = metrics["loss_per_ex"].weight </s> ===========below chunk 0=========== # module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): def train_on_data( self, training_name: str, train_loader: "C3DataLoader", eval_loader: "C3DataLoader", train_args: "TrainingArgs", ) -> None: # offset: 1 <s> callbacks=[EarlyStoppingCallback(early_stopping_patience=1)], ) trainer.train() save_dir = get_model_dir(trained=True) / training_name self.save(save_dir) print("Model saved to:", save_dir) ===========unchanged ref 0=========== at: coeditor._utils as_any(x) -> Any cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...) at: coeditor.common get_model_dir(trained=True) -> Path at: coeditor.common.WeightedSum sum: V weight: W mean() -> float at: coeditor.model.C3DataLoader all_probs: Sequence[C3Problem] tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 workers: int = 10 at: coeditor.model.RetrievalEditorModel is_parallelizable = False supports_gradient_checkpointing = False eval_loss_on_loader(dataloader: "C3DataLoader") decorate_autocast(dataloader: "C3DataLoader") save(save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None) at: coeditor.model.TrainingArgs learning_rate: float = 2e-5 ===========unchanged ref 1=========== weight_decay: float = 0.01 max_train_epochs: int = 3 reinit_weights: bool = False quicktest: bool = False lr_scheduler_type: SchedulerType = SchedulerType.LINEAR at: transformers.trainer.Trainer get_train_dataloader(self) -> DataLoader get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader train(resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union["optuna.Trial", Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs) evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str="eval") -> EvalLoopOutput at: transformers.trainer_callback EarlyStoppingCallback(early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0) at: transformers.trainer_seq2seq Seq2SeqTrainer(model: Union["PreTrainedModel", nn.Module]=None, args: "TrainingArguments"=None, data_collator: Optional["DataCollator"]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional["PreTrainedTokenizerBase"]=None, model_init: Optional[Callable[[], "PreTrainedModel"]]=None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]]=None, callbacks: Optional[List["TrainerCallback"]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None)
coeditor.dataset/make_or_load_datasets
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> ).values()
# module: coeditor.dataset def make_or_load_datasets( dataset_name: str, change_processor: ProjectChangeProcessor[C3Problem], recreate_data: bool = False, workers: int = DefaultWorkers, ) -> Mapping[str, Sequence[C3Problem]]: config_str = repr_modified_args(change_processor) save_dir = get_dataset_dir(dataset_name) / "processed" / config_str if recreate_data or not save_dir.exists(): if dataset_name == "SPOT": datasets = { + "test": join_list( + dataset_from_projects( - "test": dataset_from_projects( + [proj_root()], change_processor, [False], workers=workers - [proj_root()], change_processor, [False], workers=workers <0> ) } else: datasets = datasets_from_repos( get_dataset_dir(dataset_name) / "repos", change_processor, workers=workers, ) with timed_action("Saving datasets to disk"): save_dir.mkdir(parents=True, exist_ok=True) save_datasets(datasets, save_dir) print("Tokenized dataset saved to:", save_dir) else: with timed_action("Loading datasets from disk"): datasets = load_datasets(save_dir) size_info = run_command(["du", "-ha", "."], save_dir) print(f"Dataset sizes:") print(size_info) return datasets
===========unchanged ref 0=========== at: coeditor._utils DefaultWorkers: int = multiprocessing.cpu_count() // 2 global DefaultWorkers timed_action(name: str, silent: bool=False) repr_modified_args(instance, flatten: bool=False) -> str at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.common proj_root() -> Path get_dataset_dir(dataname: str) -> Path join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.dataset dataset_from_projects(project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> "Mapping[Path, Sequence[C3Problem]]" datasets_from_repos(repos_root: Path, change_processor: ProjectChangeProcessor[C3Problem], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> dict[str, Sequence[C3Problem]] save_datasets(datasets: Mapping[str, Any], save_dir: Path) -> None at: coeditor.dataset.datasets_from_repos projects[split] = ps dataset = dataset_from_projects( join_list(projects.values()), change_processor=change_processor, repo_training=join_list(split_is_training.values()), max_history_per_repo=max_history_per_repo, workers=workers, ) at: coeditor.scoped_changes ProjectChangeProcessor() at: pathlib.Path __slots__ = () ===========unchanged ref 1=========== mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None exists() -> bool at: pathlib.PurePath __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") at: typing Mapping = _alias(collections.abc.Mapping, 2) Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping values() -> ValuesView[_VT_co] ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: "Contextual code change prediction problem." span: ChangedCodeSpan # the lines to be edited + edit_lines: Sequence[int] - edit_lines: Collection[int] # most relevant to least relevant relevant_changes: Sequence[ChangedCodeSpan] # most relevant to least relevant relevant_unchanged: Sequence[ChangedCodeSpan] # some optional information about how the problem was generated change_type: Change[None] src_info: SrcInfo + transformations: tuple[str, ...] = () ===========changed ref 1=========== # module: coeditor.dataset @dataclass class C3EditEncoder: change_processor: ProjectChangeProcessor[C3Problem] = field( default_factory=C3ProblemGenerator ) + problem_tranformer: C3ProblemTransformer = field( + default_factory=C3ProblemSimpleSplit + ) edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer) ===========changed ref 2=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + @abstractmethod + def transform(self, prob: C3Problem) -> Iterable[C3Problem]: + ... + ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def empty() -> "TkDelta": + return TkDelta({}) + ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def changed_lines(self) -> Collection[int]: + return self._deltas.keys() + ===========changed ref 5=========== # module: coeditor.common + SEP = "-" * 80 HtmlCode = str ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def keys(self) -> Iterable[DeltaKey]: + for k, _ in self.items(): + yield k + ===========changed ref 7=========== # module: coeditor.c3problem + class C3ProblemTransformer(ABC): + "A strategy to generate new C3 problems from the orginal ones." + ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def __getitem__(self, key: DeltaKey) -> TokenSeq: + line, i = key + return self._deltas[line][i] + ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + @staticmethod + def from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]: + "Return the original input and the delta." + return change_tks_to_original_delta(change_tks) + ===========changed ref 10=========== # module: coeditor.common + def print_sections( + sections: list[tuple[str, str]], + sep: str = SEP, + ) -> None: + for title, content in sections: + print(sep) + print(f"{title}:") + print(content) + ===========changed ref 11=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: + def items(self) -> Iterable[tuple[DeltaKey, TokenSeq]]: + for l, acts in self._deltas.items(): + for i, act in enumerate(acts): + yield DeltaKey((l, i)), act + ===========changed ref 12=========== # module: coeditor.c3problem + class C3ProblemSimpleSplit(C3ProblemTransformer): + "Simply split the problem into fixed-sized editing ranges." + max_lines_to_edit: int = 25 + max_split_factor: int = 4 + ===========changed ref 13=========== # module: coeditor.encoding + # (line, action id) + DeltaKey = NewType("DeltaKey", tuple[int, int]) + TEdit = TypeVar("TEdit", bound=TokenizedEdit) ===========changed ref 14=========== # module: coeditor.c3problem @dataclass(frozen=True) class ChangedCodeSpan: """Represents the changes made to a span of code. This format does not store parent syntax nodes and is more suitable for serialization. """ headers: Sequence[ChangedHeader] + original: TkArray + delta: TkDelta - change_tks: TkArray # below are pre-edit attributes line_range: LineRange module: ModuleName ===========changed ref 15=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def __repr__(self): line_diffs = "\n".join( + f" {k}: {tuple(map(decode_tokens, a))}" for k, a in self._deltas.items() - f" {k}: {tuple(map(decode_tokens, a))}" - for k, a in self._deltas.items() - if a ) return f"TkDelta(\n{line_diffs}\n)"
scripts.train_model/train_model
Modified
temp-1
59740e7a9d5b2998ca11a37f650e7b056ba4da4f
Implement c3 change dropout.
<0>:<add> datasets = {split: transform_data(data) for split, data in datasets.items()}
# module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): <s> "base", reuse_embed=True, reinit_weights=train_args.reinit_weights ) else: model = RetrievalEditorModel.load(get_model_dir() / model_name) if os.getenv("CUDA_VISIBLE_DEVICES") is None: warnings.warn( "CUDA_VISIBLE_DEVICES not set, using 0. Note that " "the Huggingface Trainer will use all visible GPUs for training." ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" + + def transform_data(data: Sequence[C3Problem]) -> list[C3Problem]: + transformed = pmap(encoder.problem_tranformer.transform, data, chunksize=1000) + return join_list(transformed) + <0> train_tkn = encoder.edit_tokenizer eval_tkn = copy.deepcopy(train_tkn) eval_tkn.max_ref_tks_sum *= 2 eval_loader = C3DataLoader( datasets["valid"], eval_tkn, eval_batch_args, shuffle=False, desc="eval" ) if not eval_only: train_loader = C3DataLoader( datasets["train"], train_tkn, batch_args, shuffle=True, desc="training" ) with timed_action("Warm-up Training"): warmup_bargs = copy.deepcopy(batch_args) warmup_bargs.min_queries *= 4 warmup_bargs.max_queries *= 2 warm_up_</s>
===========above chunk 0=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -1 <s> "-file" model_name = f"coeditor-{dataset_name}" model_name += model_variant dec_args = DecodingArgs() if train_args.quicktest: model_name = "quicktest-" + model_name if not eval_only: check_save_dir(model_name) datasets = make_or_load_datasets( dataset_name, encoder.change_processor, recreate_data=recreate_data ) config_dict = { k: get_modified_args(v) for k, v in { "edit_tokenizer": encoder.edit_tokenizer.get_args(), "batch_args": batch_args, "train_args": train_args, "dec_args": dec_args, }.items() } project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest" if eval_only: project = "eval-" + project wandb.init(dir="..", project=project, name=model_name, config=config_dict) if train_args.quicktest: print("Using fewer data for quick test.") n_quick_exs = 20 datasets = {name: data[:n_quick_exs] for name, data in datasets.items()} if not eval_only: model = RetrievalEditorModel.from_code_t5( "base", reuse_embed=True, reinit_weights=train_args.reinit_weights ) else: </s> ===========above chunk 1=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -2 # model_variant = "-file" model_name = f"coeditor-{dataset_name}" model_name += model_variant </s> ===========below chunk 0=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: 1 <s>up_bargs.min_queries *= 4 warmup_bargs.max_queries *= 2 warm_up_data = random_subset(datasets["train"], len(datasets["train"]) // 4) warmup_tkn = copy.deepcopy(train_tkn) warmup_tkn.max_ref_tks_sum //= 3 warmup_loader = C3DataLoader( warm_up_data, warmup_tkn, warmup_bargs, shuffle=True, desc="warm-up training", ) warmup_targs = copy.deepcopy(train_args) warmup_targs.learning_rate *= 4 warmup_targs.max_train_epochs = 1 model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs) with timed_action("Fine-tune Training"): model.train_on_data(model_name, train_loader, eval_loader, train_args) model.to("cuda") with timed_action("Loss Evaluation"): eval_result = model.eval_loss_on_loader(eval_loader) eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()} wandb.log(eval_dict) max_saved_samples = 300 with timed_action("Accuracy Evaluation"): dec_result = model.predict_on_data( datasets["test"], eval_tkn, eval_batch_</s> ===========below chunk 1=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: 2 <s> dec_result = model.predict_on_data( datasets["test"], eval_tkn, eval_batch_args, dec_args ) pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result) exact_acc, exact_correct_map = dec_result.exact_match_accuracy() wandb.log({"test/exact-acc": exact_acc.average()}) out_dir = get_model_dir() / model_name / "exact_match_samples" dec_result.save_examples_to_dir( out_dir, random_subset(exact_correct_map, max_saved_samples) ) cprint("blue", "Exact-match samples saved to:", out_dir) return model ===========unchanged ref 0=========== at: _warnings warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None at: coeditor._utils pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1] timed_action(name: str, silent: bool=False) pickle_dump(file: Path, obj: Any) get_modified_args(instance, flatten: bool=False) -> dict[str, Any] | None at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.common get_model_dir(trained=True) -> Path join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] at: coeditor.common.WeightedSum sum: V weight: W average() -> float
coeditor.encoding/StrDelta.apply_to_input
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> lines = splitlines(input)
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): - lines = input.split("\n") <0> new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
===========unchanged ref 0=========== at: coeditor.common splitlines(text: str) -> list[str] at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping get(key: _KT) -> Optional[_VT_co] get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] ===========changed ref 0=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 1=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 2=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result
coeditor.encoding/StrDelta.__repr__
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items())
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) <0> return f"StrDelta(\n{line_diffs}\n)"
===========unchanged ref 0=========== at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 1=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 2=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 3=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result
coeditor.encoding/change_to_tokens
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> return join_list(([tk] + line for line in lines), Newline_id)
# module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id - return join_list([tk] + line for line in lines) <0> case _: raise AssertionError(f"Not a change type: {change}")
===========unchanged ref 0=========== at: coeditor.change Added(after: E1) Deleted(before: E1) Modified(before: E1, after: E1, unchanged: bool=False) Change = Added[E1] | Deleted[E1] | Modified[E1] at: coeditor.change.Added after: E1 at: coeditor.change.Deleted before: E1 at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") encode_basic(text: str, add_special_tokens=False) -> TokenSeq change_to_line_diffs(change: Change[str]) -> list[str] encode_diffs(diffs: Sequence[str]) -> TokenSeq ===========changed ref 0=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 3=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 4=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n")
coeditor.encoding/TokenizedEdit.show
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> return f" <{i}>" if i <= 9 else f"<{i}>"
# module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): - return self.show_prediction(None) <0>
===========unchanged ref 0=========== at: coeditor.encoding.TokenizedEdit input_tks: TokenSeq output_tks: TokenSeq main_tks: TokenSeq path: ProjectPath change_type: Change[None] ===========changed ref 0=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 3=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 4=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 5=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result
coeditor.scoped_changes/ChangedSpan.header_line_range
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> parent_scope = self.parent_scopes[-1].earlier
# module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: - parent_scope = self.parent_scopes[-1].earlier() <0> hrange = parent_scope.header_line_range return hrange
===========unchanged ref 0=========== at: coeditor.change.Added after: E1 at: coeditor.change.Deleted before: E1 at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False at: coeditor.scoped_changes LineRange = NewType("LineRange", tuple[int, int]) at: coeditor.scoped_changes.ChangeScope.__post_init__ self.header_line_range: LineRange = header_line_range at: coeditor.scoped_changes.ChangedSpan change: Change[str] parent_scopes: Sequence[Change[ChangeScope]] line_range: LineRange ===========changed ref 0=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 1=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 2=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 4=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 6=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 7=========== # module: coeditor.encoding class TokenizedEdit(ABC): - def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: - def show_label(i: int): - return f" <{i}>" if i <= 9 else f"<{i}>" - - def show_content(tks: TokenSeq): - if tks and tks[0] == Add_id: - return "+ " + decode_tokens(tks[1:]) - elif tks and tks[0] == Del_id: - return "- " + decode_tokens(tks[1:]) - else: - return " " + decode_tokens(tks) - - def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]): - segs = output_ids_as_seqs(tks) - lines = [] - for k, seg in segs.items(): - if not seg: - continue # skip empty lines - if seg[-1] == Del_id: - # show the deleted line - origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0] - origin_line.append(Newline_id) - seg = seg + origin_line - label = show_label(id_map.get(k, -1)) - lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}") - return "".join(lines) - - main_segs = output_ids_as_seqs(self.main_tks) - id_map = {k: i for i, k in enumerate(main_segs)} - main_lines = list[str]() - for line_tks in split_list(self.main_tks, Newline_id): - if line_tks and is_extra_id(line_tks[0]): - prefix = show_label(id</s> ===========changed ref 8=========== # module: coeditor.encoding class TokenizedEdit(ABC): - def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: # offset: 1 <s> if line_tks and is_extra_id(line_tks[0]): - prefix = show_label(id_map.get(line_tks[0], -1)) - line = prefix + show_content(line_tks[1:]) - else: - line = " |" + show_content(line_tks) - main_lines.append(line) - - pred_lines = ( - ["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"] - if pred_tks - else [] - ) - outputs = [ - "-" * 80, - *self.meta_data_lines(), - "========Ground Truth========", - show_extra_tokens(self.output_tks, main_segs), - *pred_lines, - "========Main Code========", - "\n".join(main_lines), - ] + [ - f"==========={name}===========\n" + decode_tokens(tks) - for name, tks in self.all_ctxs().items() - ] - return "\n".join(outputs) -
coeditor.scoped_changes/ChangedSpan.path
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> return self.parent_scopes[-1].earlier.path
# module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: - return self.parent_scopes[-1].earlier().path <0>
===========unchanged ref 0=========== at: coeditor.common ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) at: coeditor.scoped_changes.ChangeScope path: ProjectPath tree: ScopeTree spans: Sequence["StatementSpan"] subscopes: Mapping[str, Self] parent_scope: "ChangeScope | None" at: coeditor.scoped_changes.ChangedSpan parent_scopes: Sequence[Change[ChangeScope]] ===========changed ref 0=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange ===========changed ref 1=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 2=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 3=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 5=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 7=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 8=========== # module: coeditor.encoding class TokenizedEdit(ABC): - def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: - def show_label(i: int): - return f" <{i}>" if i <= 9 else f"<{i}>" - - def show_content(tks: TokenSeq): - if tks and tks[0] == Add_id: - return "+ " + decode_tokens(tks[1:]) - elif tks and tks[0] == Del_id: - return "- " + decode_tokens(tks[1:]) - else: - return " " + decode_tokens(tks) - - def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]): - segs = output_ids_as_seqs(tks) - lines = [] - for k, seg in segs.items(): - if not seg: - continue # skip empty lines - if seg[-1] == Del_id: - # show the deleted line - origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0] - origin_line.append(Newline_id) - seg = seg + origin_line - label = show_label(id_map.get(k, -1)) - lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}") - return "".join(lines) - - main_segs = output_ids_as_seqs(self.main_tks) - id_map = {k: i for i, k in enumerate(main_segs)} - main_lines = list[str]() - for line_tks in split_list(self.main_tks, Newline_id): - if line_tks and is_extra_id(line_tks[0]): - prefix = show_label(id</s> ===========changed ref 9=========== # module: coeditor.encoding class TokenizedEdit(ABC): - def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: # offset: 1 <s> if line_tks and is_extra_id(line_tks[0]): - prefix = show_label(id_map.get(line_tks[0], -1)) - line = prefix + show_content(line_tks[1:]) - else: - line = " |" + show_content(line_tks) - main_lines.append(line) - - pred_lines = ( - ["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"] - if pred_tks - else [] - ) - outputs = [ - "-" * 80, - *self.meta_data_lines(), - "========Ground Truth========", - show_extra_tokens(self.output_tks, main_segs), - *pred_lines, - "========Main Code========", - "\n".join(main_lines), - ] + [ - f"==========={name}===========\n" + decode_tokens(tks) - for name, tks in self.all_ctxs().items() - ] - return "\n".join(outputs) -
coeditor.scoped_changes/ChangedSpan._is_func_body
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> return self.parent_scopes[-1].earlier.tree.type == ptree.Function.type
# module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: def _is_func_body(self) -> bool: - return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type <0>
===========unchanged ref 0=========== at: coeditor.scoped_changes.ChangeScope tree: ScopeTree at: coeditor.scoped_changes.ChangedSpan parent_scopes: Sequence[Change[ChangeScope]] at: parso.python.tree Function(children) at: parso.python.tree.Class type = 'classdef' __slots__ = () at: parso.python.tree.Function type = 'funcdef' at: parso.python.tree.Module __slots__ = ('_used_names',) type = 'file_input' ===========changed ref 0=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path ===========changed ref 1=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange ===========changed ref 2=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 3=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 4=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 6=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 8=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 9=========== # module: coeditor.encoding class TokenizedEdit(ABC): - def show_prediction(self, pred_tks: TokenSeq | None = None) -> str: - def show_label(i: int): - return f" <{i}>" if i <= 9 else f"<{i}>" - - def show_content(tks: TokenSeq): - if tks and tks[0] == Add_id: - return "+ " + decode_tokens(tks[1:]) - elif tks and tks[0] == Del_id: - return "- " + decode_tokens(tks[1:]) - else: - return " " + decode_tokens(tks) - - def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]): - segs = output_ids_as_seqs(tks) - lines = [] - for k, seg in segs.items(): - if not seg: - continue # skip empty lines - if seg[-1] == Del_id: - # show the deleted line - origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0] - origin_line.append(Newline_id) - seg = seg + origin_line - label = show_label(id_map.get(k, -1)) - lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}") - return "".join(lines) - - main_segs = output_ids_as_seqs(self.main_tks) - id_map = {k: i for i, k in enumerate(main_segs)} - main_lines = list[str]() - for line_tks in split_list(self.main_tks, Newline_id): - if line_tks and is_extra_id(line_tks[0]): - prefix = show_label(id</s>
coeditor.scoped_changes/JModuleChange.from_modules
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> path = cspan.parent_scopes[-1].earlier.path
# module: coeditor.scoped_changes @dataclass(frozen=True) class JModuleChange: @staticmethod def from_modules(module_change: Change[JModule]): "Compute the change spans from two versions of the same module." with _tlogger.timed("JModuleChange.from_modules"): changed = dict[ProjectPath, ChangedSpan]() for cspan in get_changed_spans( module_change.map(lambda m: m.as_scope), tuple() ): - path = cspan.parent_scopes[-1].earlier().path <0> changed[path] = cspan return JModuleChange(module_change, changed)
===========unchanged ref 0=========== at: coeditor._utils.TimeLogger times: dict[str, list[float]] = field(default_factory=dict) timed(self, name: str) at: coeditor.change Change = Added[E1] | Deleted[E1] | Modified[E1] at: coeditor.change.Added map(f: Callable[[E1], T2]) -> "Added[T2]" at: coeditor.change.Deleted map(f: Callable[[E1], T2]) -> "Deleted[T2]" at: coeditor.change.Modified map(f: Callable[[E1], T2]) -> "Modified[T2]" at: coeditor.common ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) at: coeditor.scoped_changes _tlogger = TimeLogger() ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange) JModule(mname: ModuleName, tree: ptree.Module) JModuleChange(module_change: Change[JModule], changed: Mapping[ProjectPath, ChangedSpan]) get_changed_spans(scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...]=()) -> list[ChangedSpan] at: coeditor.scoped_changes.ChangeScope path: ProjectPath at: coeditor.scoped_changes.ChangedSpan parent_scopes: Sequence[Change[ChangeScope]] at: coeditor.scoped_changes.JModuleChange module_change: Change[JModule] changed: Mapping[ProjectPath, ChangedSpan] ===========changed ref 0=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path ===========changed ref 1=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: def _is_func_body(self) -> bool: + return self.parent_scopes[-1].earlier.tree.type == ptree.Function.type - return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type ===========changed ref 2=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange ===========changed ref 3=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 4=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 5=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 7=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines) ===========changed ref 9=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}")
coeditor.scoped_changes/get_changed_spans
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> line_range = (line, line + count_lines(code))
# module: coeditor.scoped_changes def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: <s> before and after the edit (and hiding all the sub spans such as class methods), then map the changes to each line back to the original regions. """ def get_modified_spans( old_scope: ChangeScope, new_scope: ChangeScope, parent_changes: Sequence[Change[ChangeScope]], ) -> Iterable[ChangedSpan]: if code_equal(old_scope.spans_code, new_scope.spans_code): return diffs = change_to_line_diffs( Modified(old_scope.spans_code, new_scope.spans_code) ) original, delta = line_diffs_to_original_delta(diffs) line = 0 for span in old_scope.spans: code = span.code - line_range = (line, line + len(code.split("\n"))) <0> if subdelta := delta.for_input_range(line_range).shifted(-line): new_code = subdelta.apply_to_input(code) change = Modified(code, new_code) yield ChangedSpan( change, parent_changes, span.line_range, ) line = line_range[1] def recurse( scope_change: Change[ChangeScope], parent_changes ) -> Iterable[ChangedSpan]: parent_changes = (*parent_changes, scope_change) match scope_change: case Modified(old_scope, new_scope): # compute statement differences yield from get_modified_spans(old_scope, new_scope, parent_changes) for sub_change in get_named_changes( old_scope.subscopes, new_scope.subscopes ).values(): yield from recurse(sub_change, parent_changes) case Added(scope) | Deleted</s>
===========above chunk 0=========== # module: coeditor.scoped_changes def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: # offset: -1 """ Extract the change spans from scope change. - We need a tree differencing algorithm that are robust to element movements. - To compute the changes to each statement region, we can compute the differences by concatenating all the regions before and after the edit (and hiding all the sub spans such as class methods), then map the changes to each line back</s> ===========below chunk 0=========== # module: coeditor.scoped_changes def get_changed_spans( scope_change: Change[ChangeScope], parent_changes: tuple[Change[ChangeScope], ...] = (), ) -> list[ChangedSpan]: # offset: 1 <s>scopes ).values(): yield from recurse(sub_change, parent_changes) case Added(scope) | Deleted(scope): for span in scope.spans: code_change = scope_change.new_value(span.code) yield ChangedSpan( code_change, parent_changes, span.line_range, ) for s in scope.subscopes.values(): s_change = scope_change.new_value(s) yield from recurse(s_change, parent_changes) spans = list(recurse(scope_change, parent_changes)) spans.sort(key=lambda s: s.line_range[0]) return spans ===========unchanged ref 0=========== at: coeditor.change Added(after: E1) Deleted(before: E1) Modified(before: E1, after: E1, unchanged: bool=False) Change = Added[E1] | Deleted[E1] | Modified[E1] get_named_changes(old_map: Mapping[T1, T2], new_map: Mapping[T1, T2]) -> Mapping[T1, Change[T2]] at: coeditor.change.Added new_value(v: T1) -> "Added[T1]" at: coeditor.change.Deleted new_value(v: T1) -> "Deleted[T1]" at: coeditor.common count_lines(text: str) -> int code_equal(code1: str, code2: str) -> bool at: coeditor.encoding change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] at: coeditor.scoped_changes ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None") ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange) at: coeditor.scoped_changes.ChangeScope spans: Sequence["StatementSpan"] at: coeditor.scoped_changes.StatementSpan.__post_init__ self.code: str = code + "\n" self.line_range: LineRange = line_range(start, end) at: typing Iterable = _alias(collections.abc.Iterable, 1) Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping values() -> ValuesView[_VT_co] ===========changed ref 0=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 1=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path ===========changed ref 2=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: def _is_func_body(self) -> bool: + return self.parent_scopes[-1].earlier.tree.type == ptree.Function.type - return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type ===========changed ref 3=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange ===========changed ref 4=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class JModuleChange: @staticmethod def from_modules(module_change: Change[JModule]): "Compute the change spans from two versions of the same module." with _tlogger.timed("JModuleChange.from_modules"): changed = dict[ProjectPath, ChangedSpan]() for cspan in get_changed_spans( module_change.map(lambda m: m.as_scope), tuple() ): + path = cspan.parent_scopes[-1].earlier.path - path = cspan.parent_scopes[-1].earlier().path changed[path] = cspan return JModuleChange(module_change, changed) ===========changed ref 5=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 6=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 8=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def apply_to_input(self, input: str): + lines = splitlines(input) - lines = input.split("\n") new_lines = list[str]() for i, line in enumerate(lines): deleted = False if delta := self._deltas.get(i): for action in delta: if action[0] == "+": new_lines.append(action[1:]) elif action[0] == "-": deleted = True if not deleted: new_lines.append(line) if delta := self._deltas.get(len(lines)): for action in delta: if action[0] == "+": new_lines.append(action[1:]) return "\n".join(new_lines)
coeditor.c3problem/C3ProblemChangeDropout.transform
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> return probs[: self.max_split_factor]
# module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransformer): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: <s>to_drop = int( - len(grouped_keys) * random.random() * self.max_dropout_ratio ) - assert n_to_drop < len(grouped_keys) - keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) - else: - keys_to_drop = [] - if keys_to_drop: - _, delta2 = sub_delta.decompose_for_change(keys_to_drop) - if delta2.num_changes() == 0: - continue - delta1, delta2 = delta.decompose_for_change(keys_to_drop) - new_original = TkArray.new(delta1.to_change_tks(original.tolist())) - new_trans = prob.transformations + ("split", "dropout") - new_span = dataclasses.replace( - prob.span, original=new_original, delta=delta2 - ) - edit_lines = delta1.get_new_target_lines(range(i, j)) - else: - new_trans = prob.transformations + ("split",) - new_span = prob.span - edit_lines = range(i, j) - sub_prob = dataclasses.replace( - prob, span=new_span, edit_lines=edit_lines, transformations=new_trans - ) - problems.append(sub_prob) - if len(problems) >= self.max_split_factor: - break - return problems + prob_and_n.append((sub_prob, n_groups)) + # return the problems with the most changes + prob_and_n.sort(key=lambda p: p[1], reverse=True) + probs = [p[0] for p in prob_and_n] <0>
===========above chunk 0=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransformer): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: -1 <s>original.tolist())) + new_trans = prob.transformations + ("split", "dropout") + new_span = dataclasses.replace( + prob.span, original=new_original, delta=delta2 + ) + else: + new_trans = prob.transformations + ("split",) + new_span = prob.span + delta1 = None + delta2_groups = delta.change_groups() + + prob_and_n = list[tuple[C3Problem, int]]() - problems = list[C3Problem]() for i in range(start, stop, self.max_lines_to_edit): j = min(i + self.max_lines_to_edit, stop) + edit_lines = range(i, j) + if delta1 is not None: + edit_lines = delta1.get_new_target_lines(edit_lines) + line_set = set(edit_lines) + n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups) + if n_groups > 0: + sub_prob = dataclasses.replace( + prob, + span=new_span, + edit_lines=edit_lines, + transformations=new_trans, - sub_delta = delta.for_input_range((i, j)) - if sub_delta.num_changes() == 0: - continue - grouped_keys = sub_delta.change_groups() - should_dropout = ( - len(grouped_keys) >= 2 and random.random() < self.dropout_prob - ) - if should_dropout: - n_to_drop = int( - len(grouped_keys) * random.random() * self.max_dropout_ratio </s> ===========above chunk 1=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransformer): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: -2 original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop + + grouped_keys = delta.change_groups() + should_dropout = len(grouped_keys) >= 2 + if should_dropout: + n_to_drop = int( + len(grouped_keys) * random.random() * self.max_dropout_ratio + ) + assert n_to_drop < len(grouped_keys) + keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) + else: + keys_to_drop = [] + if keys_to_drop: + delta1, delta2 = delta.decompose_for_change(keys_to_drop) + delta2_groups = delta2.change_groups() + if not delta2_groups: + print_err(f"{delta=}, {keys_to_drop=}, {delta1=}") + raise AssertionError("Empty delta2_groups") + new_original = TkArray.new(delta1.to_change_tks(original.tolist())) + new_trans = prob.transformations + ("split", "dropout") + new_span</s> ===========unchanged ref 0=========== at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_lines: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.C3ProblemTransformer transform(self, prob: C3Problem) -> Iterable[C3Problem] at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self] change_groups() -> Sequence[tuple[DeltaKey, ...]] at: coeditor.tk_array TkArray() ===========unchanged ref 1=========== at: coeditor.tk_array.TkArray tolist() -> TokenSeq new(tks: Sequence[int]) -> "TkArray" at: dataclasses replace(obj: _T, **changes: Any) -> _T at: random random = _inst.random at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransformer): """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, + but also randomly keep some subset of changes in the input. - but also randomly keep some subset of changes in the input.""" + + ### Change log + - v1.1 + - Dropout changes using change groups instead of individual change actions. + - Perform dropout at entire problem level ratehr than chunk level. This way, + changes in later chunks will be visible as well. + - Removed `dropout_prob`. + """ + + VERSION = "1.1" max_lines_to_edit: int = 25 max_split_factor: int = 4 - # the probability of dropping out some changes into the input - dropout_prob: float = 0.5 # when dropping the changes into the input, the biggest ratio of changes to drop max_dropout_ratio: float = 0.5 ===========changed ref 1=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.4" - VERSION = "2.3" # change spans with more than this many lines will be ignored max_span_lines: int = 500 ===========changed ref 2=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 3=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path
tests.test_edits/TestChangeIdentities.test_str_encodings
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> assert_str_equal(after, get_after(c))
# module: tests.test_edits class TestChangeIdentities: def test_str_encodings(self): for name, c in self.cases.items(): try: line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) + print("before:") + print(before) print("delta:", delta) + assert_str_equal(before, get_before(c)) - assert_str_equal(before, c.before) after = delta.apply_to_input(before) - assert_str_equal(after, c.after) <0> except Exception: print_err(f"Failed for case: {name}") raise
===========unchanged ref 0=========== at: textwrap dedent(text: str) -> str ===========changed ref 0=========== # module: tests.test_edits + def get_after(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.after + elif isinstance(change, Added): + return change.after + elif isinstance(change, Deleted): + return "" + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 1=========== # module: tests.test_edits + def get_before(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.before + elif isinstance(change, Added): + return "" + elif isinstance(change, Deleted): + return change.before + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 2=========== # module: tests.test_edits def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str): if actual != expected: + print_sections( - print(f"Failed for case: {name}") + ("Expected", decode_tokens(expected)), - print("Expected:\n", decode_tokens(expected), "<EOF>") + ("Reconstructed", decode_tokens(actual)), + ) - print("Actual:\n", decode_tokens(actual), "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 3=========== # module: tests.test_edits + def assert_change_eq(actual: Change[str], expected: Change[str], name: str): - def assert_change_eq(actual: Modified[str], expected: Modified[str], name: str): + if get_before(actual) != get_before(expected): + print_sections( + ("Expected before", get_before(expected)), + ("Reconstructed before", get_before(actual)), + ) - if actual.before != expected.before: - print(f"Failed for case: {name}") - print("Expected before:\n", expected.before, "<EOF>") - print("Reconstructed before:\n", actual.before, "<EOF>") raise ValueError(f"Failed for case: {name}") + if get_after(actual) != get_after(expected): + print_sections( + ("Expected after", get_after(expected)), + ("Reconstructed after", get_after(actual)), + ) - if actual.after != expected.after: - print(f"Failed for case: {name}") - print("Expected after:\n", expected.after, "<EOF>") - print("Reconstructed after:\n", actual.after, "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 4=========== # module: tests.test_edits class TestChangeIdentities: + cases: dict[str, Change[str]] = { - cases = { "empty": Modified("", ""), "generation": Modified("", "123"), + "added": Added("a\nb\nc\n"), + "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), </s> ===========changed ref 5=========== # module: tests.test_edits class TestChangeIdentities: # offset: 1 <s> x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1 # new comment 2 def f1(): if newcond: x = "<add>" new_var = 5 y = "<del>" return x + new_var + y """ ), ), "super long": Modified( "\n".join(f"x = {i}" for i in range(0, 200)), "\n".join(f"x = {2* (i // 2)}" for i in range(0, 200)), ), } ===========changed ref 6=========== # module: coeditor.c3problem @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): - def show(self) -> str: - return self.show_prediction(None) - ===========changed ref 7=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 8=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path ===========changed ref 9=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 10=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 11=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: def _is_func_body(self) -> bool: + return self.parent_scopes[-1].earlier.tree.type == ptree.Function.type - return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type ===========changed ref 12=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange
tests.test_edits/TestChangeIdentities.test_tk_encodings
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> assert inlined[-1] == Newline_id
# module: tests.test_edits class TestChangeIdentities: def test_tk_encodings(self): for name, c in self.cases.items(): # print(show_change(c)) c_tokens = change_to_tokens(c) print("c_tokens\n------\n", decode_tokens(c_tokens)) c_rec = tokens_to_change(c_tokens) assert_change_eq( c_rec, c, "change_to_tokens |> tokens_to_change = identity: " + name ) in_seq, out_seq = change_to_input_output(c) print("in_seq\n------\n", decode_tokens(in_seq)) print("out_seq\n------\n", decode_tokens(out_seq)) assert_tks_eq( in_seq, code_to_input( + _BaseTokenizer.encode(get_before(c), add_special_tokens=False) - _BaseTokenizer.encode(c.before, add_special_tokens=False) ), "change_to_input_output mathese code_to_input: " + name, ) + if len(splitlines(get_before(c))) < N_Extra_Ids: - if len(c.before.split("\n")) < N_Extra_Ids: inlined = inline_output_tokens(in_seq, out_seq) + if inlined: - assert inlined[-1] == Newline_id <0> assert_tks_eq( inlined[:-1], change_to_tokens(c), "inline_output_tokens: " + name ) c_rec2 = tokens_to_change(inlined[:-1]) assert_change_eq(c_rec2, c, "tokens_to_change(inlined): " + name)
===========unchanged ref 0=========== at: coeditor.change Modified(before: E1, after: E1, unchanged: bool=False) at: coeditor.common print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None assert_str_equal(actual: str, expect: str, name: str | None=None) at: coeditor.encoding change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] at: tests.test_edits get_before(change: Change[str]) -> str get_after(change: Change[str]) -> str ===========unchanged ref 1=========== at: tests.test_edits.TestChangeIdentities cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1</s> ===========changed ref 0=========== # module: tests.test_edits + def get_after(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.after + elif isinstance(change, Added): + return change.after + elif isinstance(change, Deleted): + return "" + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 1=========== # module: tests.test_edits + def get_before(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.before + elif isinstance(change, Added): + return "" + elif isinstance(change, Deleted): + return change.before + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 2=========== # module: tests.test_edits class TestChangeIdentities: def test_str_encodings(self): for name, c in self.cases.items(): try: line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) + print("before:") + print(before) print("delta:", delta) + assert_str_equal(before, get_before(c)) - assert_str_equal(before, c.before) after = delta.apply_to_input(before) + assert_str_equal(after, get_after(c)) - assert_str_equal(after, c.after) except Exception: print_err(f"Failed for case: {name}") raise ===========changed ref 3=========== # module: tests.test_edits def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str): if actual != expected: + print_sections( - print(f"Failed for case: {name}") + ("Expected", decode_tokens(expected)), - print("Expected:\n", decode_tokens(expected), "<EOF>") + ("Reconstructed", decode_tokens(actual)), + ) - print("Actual:\n", decode_tokens(actual), "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 4=========== # module: tests.test_edits + def assert_change_eq(actual: Change[str], expected: Change[str], name: str): - def assert_change_eq(actual: Modified[str], expected: Modified[str], name: str): + if get_before(actual) != get_before(expected): + print_sections( + ("Expected before", get_before(expected)), + ("Reconstructed before", get_before(actual)), + ) - if actual.before != expected.before: - print(f"Failed for case: {name}") - print("Expected before:\n", expected.before, "<EOF>") - print("Reconstructed before:\n", actual.before, "<EOF>") raise ValueError(f"Failed for case: {name}") + if get_after(actual) != get_after(expected): + print_sections( + ("Expected after", get_after(expected)), + ("Reconstructed after", get_after(actual)), + ) - if actual.after != expected.after: - print(f"Failed for case: {name}") - print("Expected after:\n", expected.after, "<EOF>") - print("Reconstructed after:\n", actual.after, "<EOF>") raise ValueError(f"Failed for case: {name}")
tests.test_edits/TestChangeIdentities.test_str_tk_conversion
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> print(show_string_diff(get_after(c), decode_tokens(tk_after)))
# module: tests.test_edits class TestChangeIdentities: def test_str_tk_conversion(self): for name, c in self.cases.items(): line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) print("delta:", delta) tk_delta = delta.to_tk_delta() tk_before = encode_basic(before) tk_after = tk_delta.apply_to_input(tk_before) + if tk_after != encode_basic(get_after(c)): - if tk_after != encode_basic(c.after): print("after diff:\n") - print(show_string_diff(c.after, decode_tokens(tk_after))) <0> c_tokens = tk_delta.to_change_tks(tk_before) if c_tokens != change_to_tokens(c): print("c_tokens diff:\n") print( show_string_diff( decode_tokens(c_tokens), decode_tokens(change_to_tokens(c)) ) ) origin1, tk_delta1 = change_tks_to_original_delta(c_tokens) if origin1 != tk_before: print("origin diff:\n") print( show_string_diff(decode_tokens(origin1), decode_tokens(tk_before)) ) assert tk_delta1.apply_to_input(origin1) == tk_after
===========unchanged ref 0=========== at: coeditor.common splitlines(text: str) -> list[str] at: coeditor.encoding _BaseTokenizer = cast( TokenizerType, TokenizerType.from_pretrained("Salesforce/codet5-base") ) Newline_id = get_tk_id("\n") N_Extra_Ids = 100 decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str change_to_tokens(change: Change[str]) -> TokenSeq tokens_to_change(tokens: TokenSeq) -> Modified[str] code_to_input(code_tks: TokenSeq) -> TokenSeq change_to_input_output(change: Change[str]) -> tuple[TokenSeq, TokenSeq] inline_output_tokens(input: TokenSeq, output: TokenSeq, leave_unpredicted=False) -> TokenSeq at: tests.test_edits get_before(change: Change[str]) -> str assert_change_eq(actual: Change[str], expected: Change[str], name: str) assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str) at: transformers.tokenization_utils_base.PreTrainedTokenizerBase vocab_files_names: Dict[str, str] = {} pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} pretrained_init_configuration: Dict[str, Dict[str, Any]] = {} max_model_input_sizes: Dict[str, Optional[int]] = {} _auto_class: Optional[str] = None model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"] padding_side: str = "right" truncation_side: str = "right" slow_tokenizer_class = None ===========unchanged ref 1=========== encode(text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, *, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> List[int] ===========changed ref 0=========== # module: tests.test_edits + def get_before(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.before + elif isinstance(change, Added): + return "" + elif isinstance(change, Deleted): + return change.before + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 1=========== # module: tests.test_edits def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str): if actual != expected: + print_sections( - print(f"Failed for case: {name}") + ("Expected", decode_tokens(expected)), - print("Expected:\n", decode_tokens(expected), "<EOF>") + ("Reconstructed", decode_tokens(actual)), + ) - print("Actual:\n", decode_tokens(actual), "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 2=========== # module: tests.test_edits + def assert_change_eq(actual: Change[str], expected: Change[str], name: str): - def assert_change_eq(actual: Modified[str], expected: Modified[str], name: str): + if get_before(actual) != get_before(expected): + print_sections( + ("Expected before", get_before(expected)), + ("Reconstructed before", get_before(actual)), + ) - if actual.before != expected.before: - print(f"Failed for case: {name}") - print("Expected before:\n", expected.before, "<EOF>") - print("Reconstructed before:\n", actual.before, "<EOF>") raise ValueError(f"Failed for case: {name}") + if get_after(actual) != get_after(expected): + print_sections( + ("Expected after", get_after(expected)), + ("Reconstructed after", get_after(actual)), + ) - if actual.after != expected.after: - print(f"Failed for case: {name}") - print("Expected after:\n", expected.after, "<EOF>") - print("Reconstructed after:\n", actual.after, "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 3=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 4=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = split_list(encode_basic(change.earlier), Newline_id) - lines = split_list(encode_basic(change.earlier()), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id + return join_list(([tk] + line for line in lines), Newline_id) - return join_list([tk] + line for line in lines) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 5=========== # module: tests.test_edits + def get_after(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.after + elif isinstance(change, Added): + return change.after + elif isinstance(change, Deleted): + return "" + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 6=========== # module: tests.test_edits class TestChangeIdentities: def test_str_encodings(self): for name, c in self.cases.items(): try: line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) + print("before:") + print(before) print("delta:", delta) + assert_str_equal(before, get_before(c)) - assert_str_equal(before, c.before) after = delta.apply_to_input(before) + assert_str_equal(after, get_after(c)) - assert_str_equal(after, c.after) except Exception: print_err(f"Failed for case: {name}") raise
tests.test_scoped_change/TestChangedSpan.check_changed_spans
Modified
temp-1
10c5e6f0d8a5440b63a9a41bac37d5eca50b6d6b
Update c3ProblemChangeDropout to v1.1.
<0>:<add> line_change = nl_change.later - nl_change.earlier
# module: tests.test_scoped_change class TestChangedSpan: @staticmethod def check_changed_spans( changed_spans: Sequence[ChangedSpan], *expects: tuple[type, int] ): print(f"{changed_spans=}\nchanges={[cs.change for cs in changed_spans]}") assert_eq( len(changed_spans), len(expects), ) for i, (change_type, n) in enumerate(expects): span = changed_spans[i] assert_eq(type(span.change), change_type) nl_change = span.change.map(count_lines) - line_change = nl_change.later() - nl_change.earlier() <0> assert_eq(line_change, n, extra_message=lambda: f"{i=}, {span.change=}")
===========unchanged ref 0=========== at: coeditor._utils assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None at: coeditor.change.Added after: E1 map(f: Callable[[E1], T2]) -> "Added[T2]" at: coeditor.change.Deleted before: E1 map(f: Callable[[E1], T2]) -> "Deleted[T2]" at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False map(f: Callable[[E1], T2]) -> "Modified[T2]" at: coeditor.common count_lines(text: str) -> int at: coeditor.scoped_changes ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange) at: coeditor.scoped_changes.ChangedSpan change: Change[str] parent_scopes: Sequence[Change[ChangeScope]] line_range: LineRange at: tests.test_scoped_change.TestChangedSpan code1 = dedent( """\ import os x = 1 y = x + 1 def f1(): global x x *= 5 return x if __name__ == "__main__": print(f1() + x) """ ) scope1 = ChangeScope.from_tree(ProjectPath("code1", ""), code_to_module(code1)) at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.common def count_lines(text: str) -> int: + if not text: + return 0 return text.count("\n") + 1 ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class TkC3Problem(TokenizedEdit): - def show(self) -> str: - return self.show_prediction(None) - ===========changed ref 2=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def path(self) -> ProjectPath: + return self.parent_scopes[-1].earlier.path - return self.parent_scopes[-1].earlier().path ===========changed ref 3=========== # module: coeditor.common def splitlines(text: str) -> list[str]: """Split a text into lines and apalways ends with an empty line.""" + if not text: + return [] return text.split("\n") ===========changed ref 4=========== # module: coeditor.encoding class TokenizedEdit(ABC): + def show(self, pred_tks: TokenSeq | None = None) -> str: - def show(self) -> str: + def show_label(i: int): + return f" <{i}>" if i <= 9 else f"<{i}>" - return self.show_prediction(None) ===========changed ref 5=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: def _is_func_body(self) -> bool: + return self.parent_scopes[-1].earlier.tree.type == ptree.Function.type - return self.parent_scopes[-1].earlier().tree.type == ptree.Function.type ===========changed ref 6=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class ChangedSpan: @property def header_line_range(self) -> LineRange: + parent_scope = self.parent_scopes[-1].earlier - parent_scope = self.parent_scopes[-1].earlier() hrange = parent_scope.header_line_range return hrange ===========changed ref 7=========== # module: tests.test_edits + def get_after(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.after + elif isinstance(change, Added): + return change.after + elif isinstance(change, Deleted): + return "" + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 8=========== # module: tests.test_edits + def get_before(change: Change[str]) -> str: + if isinstance(change, Modified): + return change.before + elif isinstance(change, Added): + return "" + elif isinstance(change, Deleted): + return change.before + else: + raise ValueError(f"Unknown change type: {change}") + ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def __repr__(self): + line_diffs = "\n".join(f" {l}: {a}" for l, a in self._deltas.items()) - line_diffs = "\n".join(f" {l}: {a}" for l, a in enumerate(self._deltas) if a) return f"StrDelta(\n{line_diffs}\n)" ===========changed ref 10=========== # module: coeditor.common def split_list( lst: list[T1], sep: T1, ) -> list[list[T1]]: """ Split a list into segments by a separator, always ends with an empty list. """ + if not lst: + return [] result = list[list[T1]]() ptr = 0 for i, item in enumerate(lst): if item == sep: result.append(lst[ptr:i]) ptr = i + 1 result.append(lst[ptr:]) return result ===========changed ref 11=========== # module: tests.test_edits def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str): if actual != expected: + print_sections( - print(f"Failed for case: {name}") + ("Expected", decode_tokens(expected)), - print("Expected:\n", decode_tokens(expected), "<EOF>") + ("Reconstructed", decode_tokens(actual)), + ) - print("Actual:\n", decode_tokens(actual), "<EOF>") raise ValueError(f"Failed for case: {name}") ===========changed ref 12=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.4" - VERSION = "2.3" # change spans with more than this many lines will be ignored max_span_lines: int = 500 ===========changed ref 13=========== # module: coeditor.scoped_changes @dataclass(frozen=True) class JModuleChange: @staticmethod def from_modules(module_change: Change[JModule]): "Compute the change spans from two versions of the same module." with _tlogger.timed("JModuleChange.from_modules"): changed = dict[ProjectPath, ChangedSpan]() for cspan in get_changed_spans( module_change.map(lambda m: m.as_scope), tuple() ): + path = cspan.parent_scopes[-1].earlier.path - path = cspan.parent_scopes[-1].earlier().path changed[path] = cspan return JModuleChange(module_change, changed)
coeditor.encoding/TkDelta.get_new_target_lines
Modified
temp-1
c0c02f6eb043d817203cf5e558c2cebc27992cb1
Make `get_new_target_lines` consider early changes.
<0>:<add> if not deleted and l in line_set:
# module: coeditor.encoding @dataclass(frozen=True) class TkDelta: def get_new_target_lines(self, lines: Sequence[int]) -> Sequence[int]: """Given a list of lines to edit, return the corresponding new lines to edit after applying this delta.""" + if not lines: + return tuple() + last_line = lines[-1] + line_set = set(lines) new_edit_lines = list[int]() offset = 0 + for l in range(last_line + 1): - for l in lines: deleted = False for act in self.get_line_change(l): if act[0] == Add_id: + if l in line_set: + new_edit_lines.append(l + offset) - new_edit_lines.append(l + offset) offset += 1 elif act[0] == Del_id: deleted = True - if not deleted: <0> new_edit_lines.append(l + offset) return tuple(new_edit_lines)
===========unchanged ref 0=========== at: coeditor.encoding Add_id = get_tk_id(Add) at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] get_line_change(line: int) -> tuple[TokenSeq, ...] at: typing Sequence = _alias(collections.abc.Sequence, 1)
coeditor._utils/pmap
Modified
temp-1
5cf0e571591acdb4fb8928f8267c6e46181cae47
Add dynamic prob transform to training pipeline.
<0>:<add> chunksize = max(1, n // (20 * max_workers))
# module: coeditor._utils def pmap( f: Callable[..., T1], *f_args: Any, desc: str | None = None, key_args: Mapping[str, Any] | None = None, max_workers: int | None = None, chunksize: int | None = None, tqdm_args: Mapping[str, Any] | None = None, ) -> list[T1]: """ Parallel map with progress displaying. """ n = len(f_args[0]) assert_eq(n, *(len(xs) for xs in f_args)) tqdm_args = dict(tqdm_args) if tqdm_args else {} tqdm_args.setdefault("smoothing", 0.0) if desc is None: desc = "pmap: " + f.__name__ if key_args is None: key_args = {} if max_workers is None: max_workers = DefaultWorkers if max_workers <= 1: outs = list[T1]() for i in tqdm(range(n), desc=desc, **tqdm_args): outs.append(f(*(a[i] for a in f_args), **key_args)) return outs if chunksize is None: - chunksize = max(1, n // (50 * max_workers)) <0> tag_f = _TaggedFunc(f, key_args) arg_tuples = zip(range(n), *f_args) with ( multiprocessing.Pool(max_workers) as pool, tqdm(total=n, desc=desc, **tqdm_args) as pbar, ): results = dict[int, T1]() for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize): results[i] = r pbar.update() return [results[i] for i in range(n)]
===========unchanged ref 0=========== at: coeditor._utils T1 = TypeVar("T1") DefaultWorkers: int = multiprocessing.cpu_count() // 2 global DefaultWorkers _TaggedFunc(f: Callable[..., T1], key_args: Mapping[str, Any]) assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str]=lambda: "") -> None at: multiprocessing Pool(processes: Optional[int]=..., initializer: Optional[Callable[..., Any]]=..., initargs: Iterable[Any]=..., maxtasksperchild: Optional[int]=...) -> pool.Pool at: tqdm.std tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs) at: typing Callable = _CallableType(collections.abc.Callable, 2) Mapping = _alias(collections.abc.Mapping, 2)
coeditor._utils/PickleCache.cached
Modified
temp-1
5cf0e571591acdb4fb8928f8267c6e46181cae47
Add dynamic prob transform to training pipeline.
<0>:<add> if remake or not path.exists():
# module: coeditor._utils class PickleCache: + def cached( + self, rel_path: Path | str, func: Callable[[], T1], remake: bool = False + ) -> T1: - def cached(self, rel_path: Path | str, func: Callable[[], T1]) -> T1: path = self.cache_dir / rel_path - if not path.exists(): <0> value = func() path.parent.mkdir(parents=True, exist_ok=True) logging.info(f"[PickleCache] Saving to cache: '{path}'") with path.open("wb") as f: pickle.dump(value, f) return value else: logging.info(f"[PickleCache] Loading from cache: '{path}'") with path.open("rb") as f: return pickle.load(f)
===========unchanged ref 0=========== at: _pickle dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None) at: coeditor._utils T1 = TypeVar("T1") at: coeditor._utils.PickleCache.__init__ self.cache_dir = cache_dir at: logging info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: pathlib Path() at: pathlib.Path __slots__ = () open(mode: OpenBinaryMode, buffering: Literal[0], encoding: None=..., errors: None=..., newline: None=...) -> FileIO open(mode: OpenBinaryModeReading, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedReader open(mode: OpenBinaryModeWriting, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedWriter open(mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedRandom open(mode: str, buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> IO[Any] open(mode: OpenTextMode=..., buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> TextIOWrapper open(mode: OpenBinaryMode, buffering: int, encoding: None=..., errors: None=..., newline: None=...) -> BinaryIO mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None exists() -> bool ===========unchanged ref 1=========== at: pathlib.PurePath __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") at: pickle dump, dumps, load, loads = _dump, _dumps, _load, _loads at: typing Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== # module: coeditor._utils def pmap( f: Callable[..., T1], *f_args: Any, desc: str | None = None, key_args: Mapping[str, Any] | None = None, max_workers: int | None = None, chunksize: int | None = None, tqdm_args: Mapping[str, Any] | None = None, ) -> list[T1]: """ Parallel map with progress displaying. """ n = len(f_args[0]) assert_eq(n, *(len(xs) for xs in f_args)) tqdm_args = dict(tqdm_args) if tqdm_args else {} tqdm_args.setdefault("smoothing", 0.0) if desc is None: desc = "pmap: " + f.__name__ if key_args is None: key_args = {} if max_workers is None: max_workers = DefaultWorkers if max_workers <= 1: outs = list[T1]() for i in tqdm(range(n), desc=desc, **tqdm_args): outs.append(f(*(a[i] for a in f_args), **key_args)) return outs if chunksize is None: + chunksize = max(1, n // (20 * max_workers)) - chunksize = max(1, n // (50 * max_workers)) tag_f = _TaggedFunc(f, key_args) arg_tuples = zip(range(n), *f_args) with ( multiprocessing.Pool(max_workers) as pool, tqdm(total=n, desc=desc, **tqdm_args) as pbar, ): results = dict[int, T1]() for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize): results[i] = r pbar.update() return [results[i] for i in range(n)]
coeditor.model/RetrievalEditorModel.predict_on_data
Modified
temp-1
5cf0e571591acdb4fb8928f8267c6e46181cae47
Add dynamic prob transform to training pipeline.
<0>:<add> desc="predict_on_data",
# module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") def predict_on_data( self, eval_problems: Sequence[C3Problem], tokenizer: C3ProblemTokenizer, batch_args: "BatchArgs", dec_args: DecodingArgs, ): if batch_args.shuffle_extra_ids: warnings.warn( "Shuffling extra ids during eval can lead to incorrect results." ) eval_loader = C3DataLoader( + eval_problems, + None, + tokenizer, + batch_args, + shuffle=False, - eval_problems, tokenizer, batch_args, shuffle=False, desc="predict_on_data" <0> ) gen_args = dec_args.to_model_args() batch_elems = list[RetrievalModelPrediction]() for batch in eval_loader: # type: ignore out_tks = self.generate( batch["input_ids"].to(self.device), references=batch["references"], query_ref_list=batch["query_ref_list"], **gen_args, ).tolist() # type: ignore input_ids = batch["input_ids"].tolist() labels = batch["labels"].tolist() query_ref_list = batch["query_ref_list"] for i in range(len(input_ids)): all_refs = batch["references"] references = [all_refs[j] for j in query_ref_list[i]] e = RetrievalModelPrediction( input_ids=remove_pad_ids(input_ids[i]), output_ids=remove_pad_ids(out_tks[i]), labels=labels[i], references=references, ) batch_elems.append(e) return RetrievalDecodingResult( eval_args={"</s>
===========below chunk 0=========== # module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") def predict_on_data( self, eval_problems: Sequence[C3Problem], tokenizer: C3ProblemTokenizer, batch_args: "BatchArgs", dec_args: DecodingArgs, ): # offset: 1 <s> ) batch_elems.append(e) return RetrievalDecodingResult( eval_args={"batch_args": batch_args, "dec_args": dec_args}, problems=eval_problems, predictions=batch_elems, ) ===========unchanged ref 0=========== at: _warnings warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) C3ProblemTokenizer(max_ref_tks: int=512, max_query_tks: int=512, max_output_tks: int=256, max_scope_tks: int=128, max_ref_tks_sum: int=512 * 16, ref_chunk_overlap: int=32, disable_builtin_defs: bool=True, disable_unchanged_refs: bool=False, current_code_only: bool=False) at: coeditor.model DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1) remove_pad_ids(ids: TokenSeq) -> TokenSeq RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) RetrievalModelPrediction(**kwargs: _VT) RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT) C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=10) ===========unchanged ref 1=========== at: coeditor.model.BatchArgs min_queries: int = 1 max_queries: int = 8 shuffle_extra_ids: bool = True at: coeditor.model.C3DataLoader all_probs: Sequence[C3Problem] transform: C3ProblemTransform | None tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 workers: int = 10 at: coeditor.model.DecodingArgs max_output_tks: int = 512 do_sample: bool = False top_p: float = 0.9 num_beams: Optional[int] = 1 length_penalty: float = 0.0 marginalize_samples: int = 1 to_model_args() -> dict at: coeditor.model.RetrievalEditorModel is_parallelizable = False supports_gradient_checkpointing = False at: coeditor.model.RetrievalEditorModel.eval_loss_on_loader core = self previous = core.training v = v + metrics.get(k, WeightedSum(0.0, 0)) at: torch.amp.autocast_mode autocast(device_type: str, dtype: Optional[_dtype]=None, enabled: bool=True, cache_enabled: Optional[bool]=None) at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] ===========unchanged ref 2=========== _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) train(mode: bool=True) -> T at: transformers.generation.utils.GenerationMixin generate(inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]]=None, synced_gpus: Optional[bool]=None, assistant_model: Optional["PreTrainedModel"]=None, streamer: Optional["BaseStreamer"]=None, **kwargs) -> Union[GenerateOutput, torch.LongTensor] at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - @abstractmethod - def transform(self, prob: C3Problem) -> Iterable[C3Problem]: - ... - ===========changed ref 1=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - @abstractmethod - def transform(self, prob: C3Problem) -> Iterable[C3Problem]: - ... - ===========changed ref 2=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - "A strategy to generate new C3 problems from the orginal ones." - ===========changed ref 3=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - "A strategy to generate new C3 problems from the orginal ones." -
coeditor.model/C3DataLoader.estimate_batch_stats
Modified
temp-1
5cf0e571591acdb4fb8928f8267c6e46181cae47
Add dynamic prob transform to training pipeline.
<0>:<add> batches = self._problems_to_batches(self._to_tokenized(self.all_probs))
# module: coeditor.model @dataclass class C3DataLoader: def estimate_batch_stats(self): - probs = list(self.all_probs) - if self.shuffle: - random.shuffle(probs) - batches = self._problems_to_batches(self._to_tokenized(probs)) <0> bsizes = list[int]() for b in batches: bsizes.append(len(b["input_ids"])) batch_stats = {k: f"{v:.1f}" for k, v in scalar_stats(bsizes).items()} return len(bsizes), batch_stats
===========unchanged ref 0=========== at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool) at: coeditor.model.C3DataLoader shuffle: bool __len__(self) -> int chunk_size: int = 1000 at: coeditor.model.C3DataLoader.__post_init__ self._len_est = n_batches at: random shuffle = _inst.shuffle at: typing Iterable = _alias(collections.abc.Iterable, 1) Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.model @dataclass class C3DataLoader: all_probs: Sequence[C3Problem] + transform: C3ProblemTransform | None tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 workers: int = 10 ===========changed ref 1=========== # module: coeditor.model @dataclass class C3DataLoader: def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]: + probs = list(probs) + if self.shuffle: + random.shuffle(probs) for i in range(0, len(probs), self.chunk_size): group = probs[i : i + self.chunk_size] + if self.transform is not None: + group = join_list( + pmap(self.transform.transform, group, tqdm_args={"disable": True}) + ) yield from pmap( self.tokenizer.tokenize_problem, group, tqdm_args={"disable": True} ) ===========changed ref 2=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - @abstractmethod - def transform(self, prob: C3Problem) -> Iterable[C3Problem]: - ... - ===========changed ref 3=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - @abstractmethod - def transform(self, prob: C3Problem) -> Iterable[C3Problem]: - ... - ===========changed ref 4=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - "A strategy to generate new C3 problems from the orginal ones." - ===========changed ref 5=========== # module: coeditor.c3problem - class C3ProblemTransformer(ABC): - "A strategy to generate new C3 problems from the orginal ones." - ===========changed ref 6=========== # module: coeditor._utils class PickleCache: + def cached( + self, rel_path: Path | str, func: Callable[[], T1], remake: bool = False + ) -> T1: - def cached(self, rel_path: Path | str, func: Callable[[], T1]) -> T1: path = self.cache_dir / rel_path + if remake or not path.exists(): - if not path.exists(): value = func() path.parent.mkdir(parents=True, exist_ok=True) logging.info(f"[PickleCache] Saving to cache: '{path}'") with path.open("wb") as f: pickle.dump(value, f) return value else: logging.info(f"[PickleCache] Loading from cache: '{path}'") with path.open("rb") as f: return pickle.load(f) ===========changed ref 7=========== # module: coeditor._utils def pmap( f: Callable[..., T1], *f_args: Any, desc: str | None = None, key_args: Mapping[str, Any] | None = None, max_workers: int | None = None, chunksize: int | None = None, tqdm_args: Mapping[str, Any] | None = None, ) -> list[T1]: """ Parallel map with progress displaying. """ n = len(f_args[0]) assert_eq(n, *(len(xs) for xs in f_args)) tqdm_args = dict(tqdm_args) if tqdm_args else {} tqdm_args.setdefault("smoothing", 0.0) if desc is None: desc = "pmap: " + f.__name__ if key_args is None: key_args = {} if max_workers is None: max_workers = DefaultWorkers if max_workers <= 1: outs = list[T1]() for i in tqdm(range(n), desc=desc, **tqdm_args): outs.append(f(*(a[i] for a in f_args), **key_args)) return outs if chunksize is None: + chunksize = max(1, n // (20 * max_workers)) - chunksize = max(1, n // (50 * max_workers)) tag_f = _TaggedFunc(f, key_args) arg_tuples = zip(range(n), *f_args) with ( multiprocessing.Pool(max_workers) as pool, tqdm(total=n, desc=desc, **tqdm_args) as pbar, ): results = dict[int, T1]() for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize): results[i] = r pbar.update() return [results[i] for i in range(n)] ===========changed ref 8=========== # module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): @torch.no_grad() @torch.autocast("cuda") def predict_on_data( self, eval_problems: Sequence[C3Problem], tokenizer: C3ProblemTokenizer, batch_args: "BatchArgs", dec_args: DecodingArgs, ): if batch_args.shuffle_extra_ids: warnings.warn( "Shuffling extra ids during eval can lead to incorrect results." ) eval_loader = C3DataLoader( + eval_problems, + None, + tokenizer, + batch_args, + shuffle=False, + desc="predict_on_data", - eval_problems, tokenizer, batch_args, shuffle=False, desc="predict_on_data" ) gen_args = dec_args.to_model_args() batch_elems = list[RetrievalModelPrediction]() for batch in eval_loader: # type: ignore out_tks = self.generate( batch["input_ids"].to(self.device), references=batch["references"], query_ref_list=batch["query_ref_list"], **gen_args, ).tolist() # type: ignore input_ids = batch["input_ids"].tolist() labels = batch["labels"].tolist() query_ref_list = batch["query_ref_list"] for i in range(len(input_ids)): all_refs = batch["references"] references = [all_refs[j] for j in query_ref_list[i]] e = RetrievalModelPrediction( input_ids=remove_pad_ids(input_ids[i]), output_ids=remove_pad_ids(out_tks[i]), labels=labels[i], references=references, ) batch_elems.append(e) return RetrievalDec</s>
scripts.train_model/train_model
Modified
temp-1
5cf0e571591acdb4fb8928f8267c6e46181cae47
Add dynamic prob transform to training pipeline.
<0>:<add> encoder.problem_tranform,
# module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): <s>n, + batch_args, + shuffle=True, + desc="training", - datasets["train"], train_tkn, batch_args, shuffle=True, desc="training" ) with timed_action("Warm-up Training"): warmup_bargs = copy.deepcopy(batch_args) warmup_bargs.min_queries *= 4 warmup_bargs.max_queries *= 2 warm_up_data = random_subset(datasets["train"], len(datasets["train"]) // 4) warmup_tkn = copy.deepcopy(train_tkn) warmup_tkn.max_ref_tks_sum //= 3 warmup_loader = C3DataLoader( warm_up_data, <0> warmup_tkn, warmup_bargs, shuffle=True, desc="warm-up training", ) warmup_targs = copy.deepcopy(train_args) warmup_targs.learning_rate *= 4 warmup_targs.max_train_epochs = 1 model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs) with timed_action("Fine-tune Training"): model.train_on_data(model_name, train_loader, eval_loader, train_args) model.to("cuda") with timed_action("Loss Evaluation"): eval_result = model.eval_loss_on_loader(eval_loader) eval_dict = {f"test/{k</s>
===========above chunk 0=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -1 <s>model_dir() / model_name) if os.getenv("CUDA_VISIBLE_DEVICES") is None: warnings.warn( "CUDA_VISIBLE_DEVICES not set, using 0. Note that " "the Huggingface Trainer will use all visible GPUs for training." ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" - def transform_data(data: Sequence[C3Problem]) -> list[C3Problem]: - transformed = pmap(encoder.problem_tranformer.transform, data, chunksize=1000) - return join_list(transformed) - - datasets = {split: transform_data(data) for split, data in datasets.items()} - train_tkn = encoder.edit_tokenizer eval_tkn = copy.deepcopy(train_tkn) eval_tkn.max_ref_tks_sum *= 2 eval_loader = C3DataLoader( + datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval" - datasets["valid"], eval_tkn, eval_batch_args, shuffle=False, desc="eval" ) if not eval_only: + # we attach the problem transform to the dataloader to generate data on-the-fly train_loader = C3DataLoader( + datasets["train"], + encoder.problem_tranform, + train_tkn, + batch_args, + shuffle=True, + desc="training", - datasets["train"</s> ===========above chunk 1=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -2 <s>, v in { "edit_tokenizer": encoder.edit_tokenizer.get_args(), "batch_args": batch_args, "train_args": train_args, "dec_args": dec_args, }.items() } project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest" if eval_only: project = "eval-" + project wandb.init(dir="..", project=project, name=model_name, config=config_dict) if train_args.quicktest: print("Using fewer data for quick test.") n_quick_exs = 20 + datasets = C3ProblemDataset( + train=datasets["train"][:n_quick_exs], + valid=datasets["valid"][:n_quick_exs], + test=datasets["test"][:n_quick_exs], + ) - datasets = {name: data[:n_quick_exs] for name, data in datasets.items()} if not eval_only: model = RetrievalEditorModel.from_code_t5( "base", reuse_embed=True, reinit_weights=train_args.reinit_weights ) else: model = RetrievalEditorModel.load(get_model_dir() / model_name) if os.getenv("CUDA_VISIBLE_DEVICES") is None: </s> ===========above chunk 2=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -3 # model_variant = "-file" model_name = f"coeditor-{dataset_name}" model_name += model_variant dec_args = DecodingArgs() if train_args.quicktest: model_name = "quicktest-" + model_name if not eval_only: check_save_dir(model_name) + # problems will be transformed and saved for valid and test but not train. + datasets = make_or_load_dataset( - datasets = make_or_load_datasets( + dataset_name, + encoder.change_processor, + encoder.problem_tranform, + remake_problems=recreate_data, - dataset_name, encoder.change_processor, recreate_data=recreate_data ) config_dict = { k: get_modified_args(v) </s> ===========below chunk 0=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: 1 <s> eval_result = model.eval_loss_on_loader(eval_loader) eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()} wandb.log(eval_dict) max_saved_samples = 300 with timed_action("Accuracy Evaluation"): dec_result = model.predict_on_data( datasets["test"], eval_tkn, eval_batch_args, dec_args ) pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result) exact_acc, exact_correct_map = dec_result.exact_match_accuracy() wandb.log({"test/exact-acc": exact_acc.average()}) out_dir = get_model_dir() / model_name / "exact_match_samples" dec_result.save_examples_to_dir( out_dir, random_subset(exact_correct_map, max_saved_samples) ) cprint("blue", "Exact-match samples saved to:", out_dir) return model
coeditor.encoding/tokens_to_change
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> return Modified(before_code, after_code)
# module: coeditor.encoding def tokens_to_change(tokens: TokenSeq) -> Modified[str]: "Decode a token sequence into a change." tk_lines = split_list(tokens, Newline_id) + before_lines = list[TokenSeq]() - before_lines = list[str]() + after_lines = list[TokenSeq]() - after_lines = list[str]() for tk_line in tk_lines: if tk_line and tk_line[0] == Add_id: + after_lines.append(tk_line[1:]) - after_lines.append(_Tokenizer.decode(tk_line[1:])) elif tk_line and tk_line[0] == Del_id: + before_lines.append(tk_line[1:]) - before_lines.append(_Tokenizer.decode(tk_line[1:])) else: - line = _Tokenizer.decode(tk_line) + before_lines.append(tk_line) - before_lines.append(line) + after_lines.append(tk_line) - after_lines.append(line) + before_code = decode_tokens(join_list(before_lines, Newline_id)) + after_code = decode_tokens(join_list(after_lines, Newline_id)) - return Modified(before="\n".join(before_lines), after="\n".join(after_lines)) <0>
===========unchanged ref 0=========== at: coeditor.change Modified(before: E1, after: E1, unchanged: bool=False) at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) -
coeditor.c3problem/C3ProblemChangeDropout.transform
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> new_original = TkArray.new(delta1.apply_to_change(original.tolist()))
# module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: <s> len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) delta2_groups = delta2.change_groups() if not delta2_groups: print_err(f"{delta=}, {keys_to_drop=}, {delta1=}") raise AssertionError("Empty delta2_groups") - new_original = TkArray.new(delta1.to_change_tks(original.tolist())) <0> new_trans = prob.transformations + ("split", "dropout") new_span = dataclasses.replace( prob.span, original=new_original, delta=delta2 ) else: new_trans = prob.transformations + ("split",) new_span = prob.span delta1 = None delta2_groups = delta.change_groups() prob_and_n = list[tuple[C3Problem, int]]() for i in range(start, stop, self.max_lines_to_edit): j = min(i + self.max_lines_to_edit, stop) edit_lines = range(i, j) if delta1 is not None: edit_lines = delta1.get_new_target_lines(edit_lines) line_set = set(edit_lines) n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups) </s>
===========above chunk 0=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: -1 original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped</s> ===========below chunk 0=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: 1 <s>_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups) if n_groups > 0: sub_prob = dataclasses.replace( prob, span=new_span, edit_lines=edit_lines, transformations=new_trans, ) prob_and_n.append((sub_prob, n_groups)) # return the problems with the most changes prob_and_n.sort(key=lambda p: p[1], reverse=True) probs = [p[0] for p in prob_and_n] return probs[: self.max_split_factor] ===========unchanged ref 0=========== at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_lines: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.C3ProblemChangeDropout VERSION = "1.1" max_lines_to_edit: int = 25 max_split_factor: int = 4 at: coeditor.c3problem.C3ProblemTransform transform(self, prob: C3Problem) -> Sequence[C3Problem] at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] ===========unchanged ref 1=========== decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self] change_groups() -> Sequence[tuple[DeltaKey, ...]] at: coeditor.tk_array TkArray() at: coeditor.tk_array.TkArray tolist() -> TokenSeq new(tks: Sequence[int]) -> "TkArray" at: dataclasses replace(obj: _T, **changes: Any) -> _T at: random random = _inst.random at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summarize(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 3=========== # module: coeditor.encoding def tokens_to_change(tokens: TokenSeq) -> Modified[str]: "Decode a token sequence into a change." tk_lines = split_list(tokens, Newline_id) + before_lines = list[TokenSeq]() - before_lines = list[str]() + after_lines = list[TokenSeq]() - after_lines = list[str]() for tk_line in tk_lines: if tk_line and tk_line[0] == Add_id: + after_lines.append(tk_line[1:]) - after_lines.append(_Tokenizer.decode(tk_line[1:])) elif tk_line and tk_line[0] == Del_id: + before_lines.append(tk_line[1:]) - before_lines.append(_Tokenizer.decode(tk_line[1:])) else: - line = _Tokenizer.decode(tk_line) + before_lines.append(tk_line) - before_lines.append(line) + after_lines.append(tk_line) - after_lines.append(line) + before_code = decode_tokens(join_list(before_lines, Newline_id)) + after_code = decode_tokens(join_list(after_lines, Newline_id)) + return Modified(before_code, after_code) - return Modified(before="\n".join(before_lines), after="\n".join(after_lines))
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
# module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: <s>output.extend(line_change) if line_change and line_change[-1] != Del_id: chunk_output.append(Newline_id) if len(chunk_input) > input_limit: break edit_stop = last_line + 1 # limit the input size if it's too long chunk_input = truncate_section( chunk_input, TruncateAt.Right, input_limit, inplace=True ) chunk_output = truncate_output_tks(chunk_input, chunk_output) # try move some prev_change_tks into the input above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id) - above_tks = tk_delta.for_input_range((0, edit_start)).to_change_tks(above_tks) <0> below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id) chunk_input, above_tks, below_tks = self._inline_some_context( chunk_input, above_tks, below_tks, input_limit ) chunk_output = truncate_section( chunk_output, TruncateAt.Right, self.max_output_tks, add_bos=False, inplace=True, ) above_chunks = break_into_chunks( above_tks, lambda i: self._encode_headers(span.headers, -1 - i), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, right_to_left=True, ) if not below_tks: below_chunks = [] else: below_chunks = break_into_chunks( below_</s>
===========above chunk 0=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: -1 span = problem.span original: TokenSeq = span.original.tolist() tk_delta: TkDelta = span.delta origin_lines = split_list(original, Newline_id) edit_start = problem.edit_lines[0] scope_tks = self._encode_headers(span.headers, 0) input_limit = self.max_query_tks - len(scope_tks) chunk_input = TokenSeq() chunk_output = TokenSeq() last_line = edit_start for i, l in enumerate(problem.edit_lines): for line in origin_lines[last_line + 1 : l]: chunk_input.extend(line) chunk_input.append(Newline_id) chunk_input.append(get_extra_id(i)) if l < len(origin_lines): chunk_input.extend(origin_lines[l]) chunk_input.append(Newline_id) last_line = l line_change = join_list(tk_delta.get_line_change(l), Newline_id) chunk_output.append(get_extra_id(i)) chunk_output.extend(line_change) if line_change and line_change[-1] != Del_id: chunk_</s> ===========below chunk 0=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: 1 <s>tks: below_chunks = [] else: below_chunks = break_into_chunks( below_tks, lambda i: self._encode_headers(span.headers, i + 1), chunk_size=self.max_ref_tks, overlap=self.ref_chunk_overlap, ) above_chunks = [ (f"above chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(above_chunks) ] below_chunks = [ (f"below chunk {i}", TkArray.new(chunk)) for i, chunk in enumerate(below_chunks) ] all_refs = above_chunks + below_chunks ref_size_sum = sum(len(ref) for _, ref in all_refs) # compute the references that are relevant to this span if ref_size_sum < self.max_ref_tks_sum: changed = self._group_encode_changed_refs(problem.relevant_changes) for i, chunk in enumerate(changed): all_refs.append((f"changed ref {i}", TkArray.new(chunk))) ref_size_sum += sum(len(x) for x in changed) if ref_size_sum < self.max_ref_tks_sum: unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged) for i, chunk in enumerate(unchanged): all_refs.append((f"unchanged ref {i}", TkArray.new(chunk))) # take until we hit the limit ref_size_sum = 0 kept_refs = list[tuple[str, TkArray]]() for (name, ref) in all_refs: if ref_size_sum + len(ref) > self.max_</s> ===========below chunk 1=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def tokenize_problem( self, problem: C3Problem, ) -> TkC3Problem: # offset: 2 <s> for (name, ref) in all_refs: if ref_size_sum + len(ref) > self.max_ref_tks_sum: continue ref_size_sum += len(ref) kept_refs.append((name, ref)) return TkC3Problem( TkArray.new(scope_tks + chunk_input), TkArray.new(chunk_output), path=span.headers[-1].path, change_type=problem.change_type, named_references=kept_refs, project=problem.src_info["project"], commit=problem.src_info["commit"], ) ===========unchanged ref 0=========== at: cachetools.lru LRUCache(maxsize: int, getsizeof: Optional[Callable[[_VT], int]]=...) at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) TkC3Problem(input: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_lines: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] at: coeditor.c3problem.C3ProblemTokenizer VERSION = "2.3" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 max_ref_tks_sum: int = 512 * 12 ref_chunk_overlap: int = 32 _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq _inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq] _group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] _group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] _group_encode_changed_refs(self, changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq] at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader]
coeditor.c3problem/C3ProblemTokenizer._group_encode_changed_refs
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> c_tks = c.delta.apply_to_change(c.original.tolist())
# module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _group_encode_changed_refs( self, changes: Sequence[ChangedCodeSpan] ) -> Sequence[TokenSeq]: module2changes = groupby(changes, lambda c: c.module) all_chunks = list[TokenSeq]() for change_group in module2changes.values(): change_group.sort(key=lambda c: c.line_range[0]) segs = list[TokenSeq]() # we'll add module as the chunk header, so we start within the module last_scope = change_group[0].headers[:1] for c in change_group: header_diff = list[ChangedHeader]() for i, h in enumerate(c.headers): if i >= len(last_scope) or h.path != last_scope[i].path: header_diff.append(h) if header_diff: header_tks = self._encode_headers(header_diff, 0) segs.append(header_tks) - c_tks = c.delta.to_change_tks(c.original.tolist()) <0> segs.append(c_tks) segs.append([Newline_id, Newline_id]) last_scope = c.headers segs.append([Newline_id]) mod_change = change_group[0].headers[:1] mod_chunks = break_into_chunks( join_list(segs), lambda i: self._encode_headers(mod_change, i), self.max_ref_tks, overlap=self.ref_chunk_overlap, ) all_chunks.extend(mod_chunks) return all_chunks
===========unchanged ref 0=========== at: coeditor._utils groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]] at: coeditor.c3problem ChangedHeader(change_tks: TkArray, type: str, line_range: LineRange, path: ProjectPath) ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName) at: coeditor.c3problem.C3ProblemTokenizer max_ref_tks: int = 512 ref_chunk_overlap: int = 32 _encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] at: coeditor.c3problem.ChangedHeader path: ProjectPath at: coeditor.common TokenSeq = list[Token] join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Newline_id = get_tk_id("\n") break_into_chunks(tks: TokenSeq, header_f: Callable[[int], TokenSeq], chunk_size: int, overlap: int, right_to_left: bool=False, add_bos: bool=True, max_return_chunks: int | None=None) -> list[TokenSeq] at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summarize(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 1=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) delta2_groups = delta2.change_groups() if not delta2_groups: print_err(f"{delta=}, {keys_to_drop=}, {delta1=}") raise AssertionError("Empty delta2_groups") + new_original = TkArray.new(delta1.apply_to_change(original.tolist())) - new_original = TkArray.new(delta1.to_change_tks(original.tolist())) new_trans = prob.transformations + ("split", "dropout") new_span = dataclasses.replace( prob.span, original=new_original, delta=delta2 ) else: new_trans = prob.transformations + ("split",) new_span = prob.span delta1 = None delta2_groups = delta.change_groups() prob_and_n = list[tuple[C3Problem, int]]() for i in range(start, stop, self.max_lines_to_edit): j = min(</s> ===========changed ref 2=========== # module: coeditor.c3problem class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: 1 <s> int]]() for i in range(start, stop, self.max_lines_to_edit): j = min(i + self.max_lines_to_edit, stop) edit_lines = range(i, j) if delta1 is not None: edit_lines = delta1.get_new_target_lines(edit_lines) line_set = set(edit_lines) n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups) if n_groups > 0: sub_prob = dataclasses.replace( prob, span=new_span, edit_lines=edit_lines, transformations=new_trans, ) prob_and_n.append((sub_prob, n_groups)) # return the problems with the most changes prob_and_n.sort(key=lambda p: p[1], reverse=True) probs = [p[0] for p in prob_and_n] return probs[: self.max_split_factor] ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 4=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) -
coeditor.model/RetrievalDecodingResult.exact_match_accuracy
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> print(prob.summarize())
# module: coeditor.model @dataclass class RetrievalDecodingResult: def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]: ex2correct = dict[int, bool]() + bad_probs = list[C3Problem]() for i, mp in enumerate(self.predictions): prob = self.problems[i] original = prob.span.original.tolist() pred_delta = TkDelta.from_output_tks(mp["output_ids"]) label_delta = TkDelta.from_output_tks(mp["labels"]) + if not prob.edit_lines: + bad_probs.append(prob) + continue - assert isinstance(prob.edit_lines, range) + line_shift = prob.edit_lines[0] - line_shift = prob.edit_lines.start + pred_change = pred_delta.shifted(line_shift).apply_to_change(original) - pred_code = pred_delta.shifted(line_shift).apply_to_input(original) + label_change = label_delta.shifted(line_shift).apply_to_change(original) - label_code = label_delta.shifted(line_shift).apply_to_input(original) + pred_code = tokens_to_change(pred_change).after + label_code = tokens_to_change(label_change).after + ex2correct[i] = code_equal(pred_code, label_code) - is_correct = code_equal(decode_tokens(pred_code), decode_tokens(label_code)) - ex2correct[i] = is_correct correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct)) + if bad_probs: + cprint("yellow", "Number of problems with no edits:", len(bad_probs)) + for prob in bad_probs[:5]: <0> return correct_count, ex2correct
===========unchanged ref 0=========== at: coeditor._utils assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_line_ids: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Mapping["PyFullName", "PyDefinition"] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.common CountedSum = WeightedSum[int, int] at: coeditor.encoding TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] apply_to_change(change: TokenSeq) -> TokenSeq shifted(shift_lines: int) -> Self from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta" at: coeditor.model.RetrievalDecodingResult eval_args: dict problems: Sequence[C3Problem] predictions: Sequence[RetrievalModelPrediction] at: coeditor.tk_array.TkArray tolist() -> TokenSeq ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summarize(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 3=========== # module: coeditor.encoding def tokens_to_change(tokens: TokenSeq) -> Modified[str]: "Decode a token sequence into a change." tk_lines = split_list(tokens, Newline_id) + before_lines = list[TokenSeq]() - before_lines = list[str]() + after_lines = list[TokenSeq]() - after_lines = list[str]() for tk_line in tk_lines: if tk_line and tk_line[0] == Add_id: + after_lines.append(tk_line[1:]) - after_lines.append(_Tokenizer.decode(tk_line[1:])) elif tk_line and tk_line[0] == Del_id: + before_lines.append(tk_line[1:]) - before_lines.append(_Tokenizer.decode(tk_line[1:])) else: - line = _Tokenizer.decode(tk_line) + before_lines.append(tk_line) - before_lines.append(line) + after_lines.append(tk_line) - after_lines.append(line) + before_code = decode_tokens(join_list(before_lines, Newline_id)) + after_code = decode_tokens(join_list(after_lines, Newline_id)) + return Modified(before_code, after_code) - return Modified(before="\n".join(before_lines), after="\n".join(after_lines)) ===========changed ref 4=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _group_encode_changed_refs( self, changes: Sequence[ChangedCodeSpan] ) -> Sequence[TokenSeq]: module2changes = groupby(changes, lambda c: c.module) all_chunks = list[TokenSeq]() for change_group in module2changes.values(): change_group.sort(key=lambda c: c.line_range[0]) segs = list[TokenSeq]() # we'll add module as the chunk header, so we start within the module last_scope = change_group[0].headers[:1] for c in change_group: header_diff = list[ChangedHeader]() for i, h in enumerate(c.headers): if i >= len(last_scope) or h.path != last_scope[i].path: header_diff.append(h) if header_diff: header_tks = self._encode_headers(header_diff, 0) segs.append(header_tks) + c_tks = c.delta.apply_to_change(c.original.tolist()) - c_tks = c.delta.to_change_tks(c.original.tolist()) segs.append(c_tks) segs.append([Newline_id, Newline_id]) last_scope = c.headers segs.append([Newline_id]) mod_change = change_group[0].headers[:1] mod_chunks = break_into_chunks( join_list(segs), lambda i: self._encode_headers(mod_change, i), self.max_ref_tks, overlap=self.ref_chunk_overlap, ) all_chunks.extend(mod_chunks) return all_chunks
tests.test_edits/TestChangeIdentities.test_str_tk_conversion
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> c_tokens = tk_delta.apply_to_change(tk_before)
# module: tests.test_edits class TestChangeIdentities: def test_str_tk_conversion(self): for name, c in self.cases.items(): line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) print("delta:", delta) tk_delta = delta.to_tk_delta() tk_before = encode_basic(before) tk_after = tk_delta.apply_to_input(tk_before) if tk_after != encode_basic(get_after(c)): print("after diff:\n") print(show_string_diff(get_after(c), decode_tokens(tk_after))) - c_tokens = tk_delta.to_change_tks(tk_before) <0> if c_tokens != change_to_tokens(c): print("c_tokens diff:\n") print( show_string_diff( decode_tokens(c_tokens), decode_tokens(change_to_tokens(c)) ) ) origin1, tk_delta1 = change_tks_to_original_delta(c_tokens) if origin1 != tk_before: print("origin diff:\n") print( show_string_diff(decode_tokens(origin1), decode_tokens(tk_before)) ) assert tk_delta1.apply_to_input(origin1) == tk_after
===========unchanged ref 0=========== at: coeditor._utils show_string_diff(str1: str, str2: str, max_ctx: int | None=6) -> str at: coeditor.encoding decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta] change_to_tokens(change: Change[str]) -> TokenSeq at: tests.test_edits get_after(change: Change[str]) -> str ===========unchanged ref 1=========== at: tests.test_edits.TestChangeIdentities cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1</s> ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summarize(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 3=========== # module: coeditor.encoding def tokens_to_change(tokens: TokenSeq) -> Modified[str]: "Decode a token sequence into a change." tk_lines = split_list(tokens, Newline_id) + before_lines = list[TokenSeq]() - before_lines = list[str]() + after_lines = list[TokenSeq]() - after_lines = list[str]() for tk_line in tk_lines: if tk_line and tk_line[0] == Add_id: + after_lines.append(tk_line[1:]) - after_lines.append(_Tokenizer.decode(tk_line[1:])) elif tk_line and tk_line[0] == Del_id: + before_lines.append(tk_line[1:]) - before_lines.append(_Tokenizer.decode(tk_line[1:])) else: - line = _Tokenizer.decode(tk_line) + before_lines.append(tk_line) - before_lines.append(line) + after_lines.append(tk_line) - after_lines.append(line) + before_code = decode_tokens(join_list(before_lines, Newline_id)) + after_code = decode_tokens(join_list(after_lines, Newline_id)) + return Modified(before_code, after_code) - return Modified(before="\n".join(before_lines), after="\n".join(after_lines))
tests.test_edits/TestChangeIdentities.test_get_new_target_lines
Modified
temp-1
76c8cd1ea3625e687e9109c30b69dc7e42c69f95
Improve prediction visualization. - rename `to_change_tks`.
<0>:<add> step1 = delta1.apply_to_change(original)
# module: tests.test_edits class TestChangeIdentities: def test_get_new_target_lines(self): for name, c in self.cases.items(): original, delta = TkDelta.from_change_tks(change_to_tokens(c)) n_origin_lines = len(split_list(original, Newline_id)) edit_lines = range(n_origin_lines + 1) keys = tuple(delta.keys()) for _ in range(10): n_keys = int(len(keys) * random.random()) sub_keys = random_subset(keys, n_keys) sub_keys.sort() delta1, delta2 = delta.decompose_for_change(sub_keys) new_edit_lines = delta1.get_new_target_lines(edit_lines) new_edit_set = set(new_edit_lines) for l in delta2.changed_lines(): if l not in new_edit_set and l != n_origin_lines: print_err(f"{edit_lines=}") print_err("original", SEP) print_err(add_line_numbers(decode_tokens(original), start=0)) print_err(SEP) print_err(f"{delta=}") print_err(f"{sub_keys=}") print_err(f"{delta1=}") print_err("step1", SEP) - step1 = delta1.to_change_tks(original) <0> print_err(add_line_numbers(decode_tokens(step1), start=0)) print_err(SEP) print_err(f"{new_edit_lines=}") print_err(f"{delta2=}") raise AssertionError(f"{l=} not in {new_edit_lines=}")
===========unchanged ref 0=========== at: coeditor._utils add_line_numbers(code: str, start: int=1) at: coeditor.common split_list(lst: list[T1], sep: T1) -> list[list[T1]] SEP = "-" * 80 random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None at: coeditor.encoding Newline_id = get_tk_id("\n") decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) change_to_tokens(change: Change[str]) -> TokenSeq at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"] at: random random = _inst.random ===========unchanged ref 1=========== at: tests.test_edits.TestChangeIdentities cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1</s> ===========changed ref 0=========== # module: tests.test_edits class TestChangeIdentities: def test_str_tk_conversion(self): for name, c in self.cases.items(): line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) print("delta:", delta) tk_delta = delta.to_tk_delta() tk_before = encode_basic(before) tk_after = tk_delta.apply_to_input(tk_before) if tk_after != encode_basic(get_after(c)): print("after diff:\n") print(show_string_diff(get_after(c), decode_tokens(tk_after))) + c_tokens = tk_delta.apply_to_change(tk_before) - c_tokens = tk_delta.to_change_tks(tk_before) if c_tokens != change_to_tokens(c): print("c_tokens diff:\n") print( show_string_diff( decode_tokens(c_tokens), decode_tokens(change_to_tokens(c)) ) ) origin1, tk_delta1 = change_tks_to_original_delta(c_tokens) if origin1 != tk_before: print("origin diff:\n") print( show_string_diff(decode_tokens(origin1), decode_tokens(tk_before)) ) assert tk_delta1.apply_to_input(origin1) == tk_after ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summarize(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 2=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) - ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class TkDelta: - def to_change_tks(self, input: TokenSeq) -> TokenSeq: - lines = split_list(input, Newline_id) - - new_lines = list[TokenSeq]() - for i, line in enumerate(lines): - deleted = False - if delta := self._deltas.get(i): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - elif action[0] == Del_id: - deleted = True - if deleted: - new_lines.append([Del_id] + line) - else: - new_lines.append(line) - if delta := self._deltas.get(len(lines)): - for action in delta: - if action[0] == Add_id: - new_lines.append(action) - return join_list(new_lines, Newline_id) -
coeditor.encoding/StrDelta.to_tk_delta
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> line_tk_delta.append([Add_id] + encode_single_line(action[1:]))
# module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": deltas = dict[int, tuple[TokenSeq, ...]]() for k, line_delta in self._deltas.items(): line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": - line_tk_delta.append([Add_id] + encode_basic(action[1:])) <0> elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") deltas[k] = tuple(line_tk_delta) return TkDelta(deltas)
===========unchanged ref 0=========== at: coeditor.encoding StrDelta(_deltas: Mapping[int, tuple[str, ...]]) at: coeditor.encoding.StrDelta _deltas: Mapping[int, tuple[str, ...]] num_changes() -> int at: typing.Mapping items() -> AbstractSet[Tuple[_KT, _VT_co]] ===========changed ref 0=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 1=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 2=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 3=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) +
coeditor.encoding/change_to_tokens
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> lines = encode_lines(change.earlier)
# module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): - lines = split_list(encode_basic(change.earlier), Newline_id) <0> tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}")
===========unchanged ref 0=========== at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.encoding Newline_id = get_tk_id("\n") TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) at: coeditor.encoding.change_tks_to_original_delta input_lines: list[TokenSeq] = [] ===========changed ref 0=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 1=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 2=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 3=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 4=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": deltas = dict[int, tuple[TokenSeq, ...]]() for k, line_delta in self._deltas.items(): line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": + line_tk_delta.append([Add_id] + encode_single_line(action[1:])) - line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") deltas[k] = tuple(line_tk_delta) return TkDelta(deltas)
coeditor.encoding/encode_diffs
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> result.append(Newline_id)
# module: coeditor.encoding def encode_diffs(diffs: Sequence[str]) -> TokenSeq: """ A helper function to encode the diff lines (with '+', '-', or ' ' prefixes) into a token sequence with the special <add> and <del> tokens. """ + result = TokenSeq() + max_i = len(diffs) - 1 - prefixes = list[TokenSeq]() - code_lines = list[str]() for i, diff in enumerate(diffs): if diff.startswith("+"): + result.append(Add_id) - prefixes.append([Add_id]) elif diff.startswith("-"): + result.append(Del_id) - prefixes.append([Del_id]) else: assert diff.startswith(" ") - prefixes.append([]) - code_lines.append(diff[1:]) - code_tks = _BaseTokenizer.encode("\n".join(code_lines), add_special_tokens=False) - code_lines = split_list(code_tks, Newline_id) - for i, line in enumerate(code_lines): - if prefixes[i]: - code_lines[i] = prefixes[i] + line - return join_list(code_lines, Newline_id) + result.extend(encode_single_line(diff[1:])) + if i < max_i: <0>
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding.rearrange_diffs_ del_start = i del_end = i at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = encode_lines(change.earlier) - lines = split_list(encode_basic(change.earlier), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 1=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 2=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 3=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 4=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 5=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": deltas = dict[int, tuple[TokenSeq, ...]]() for k, line_delta in self._deltas.items(): line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": + line_tk_delta.append([Add_id] + encode_single_line(action[1:])) - line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") deltas[k] = tuple(line_tk_delta) return TkDelta(deltas)
coeditor.encoding/TokenizedEdit.is_repetitive_edit
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> return encode_single_line(s)
# module: coeditor.encoding class TokenizedEdit(ABC): def is_repetitive_edit(self, blue_threshold=0.8) -> bool: """Check if all additions in the output_tokens can be matched to an addition in the input_tokens with a BLEU score above the threshold.""" def get_changes(tks, key_tk: Token): if tks and tks[0] == key_tk: s = decode_tokens(tks[1:]) s.strip() - return encode_basic(s) <0> else: return [] ctx_lines = split_list(self.input_tks, Newline_id) main_lines = output_ids_as_seqs(self.input_tks) ctx_addtions = [tks for l in ctx_lines if (tks := get_changes(l, Add_id))] ctx_deletions = [tks for l in ctx_lines if (tks := get_changes(l, Del_id))] def has_match(line, line_key: Token): if line: if line[0] == Add_id: added = line[1:] return any( as_any(sentence_bleu([ref], added)) > blue_threshold for ref in ctx_addtions ) elif line == [Del_id]: if line_key not in main_lines: print(f"Key {decode_tokens([line_key])} not found.") print("Main tokens:") print(decode_tokens(self.main_tks)) deleted = main_lines[line_key] return any( as_any(sentence_bleu([ref], deleted)) > blue_threshold for ref in ctx_deletions ) else: raise ValueError(f"Unexpected line: {decode_tokens(line)}") else: return True out_segs = output_ids_as_seqs(self.output_tks) if all(not s for s in out</s>
===========below chunk 0=========== # module: coeditor.encoding class TokenizedEdit(ABC): def is_repetitive_edit(self, blue_threshold=0.8) -> bool: # offset: 1 <s>_segs = output_ids_as_seqs(self.output_tks) if all(not s for s in out_segs.values()): return False for k, seg in out_segs.items(): for line in split_list(seg, Newline_id): if not has_match(line, k): return False return True ===========unchanged ref 0=========== at: coeditor._utils as_any(x) -> Any at: coeditor.common Token = int split_list(lst: list[T1], sep: T1) -> list[list[T1]] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str encode_single_line(text: str, add_special_tokens=False) -> TokenSeq output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq] at: coeditor.encoding.TokenizedEdit input_tks: TokenSeq output_tks: TokenSeq main_tks: TokenSeq path: ProjectPath change_type: Change[None] all_ctxs() -> dict[str, TokenSeq] at: coeditor.encoding.TokenizedEdit.show show_ctx(ctx_tks: TokenSeq) outputs = [ "-" * 80, *self.meta_data_lines(), "========Ground Truth========", show_extra_tokens(self.output_tks, main_segs), *pred_lines, "========Main Code========", "\n".join(main_lines), ] + [ f"==========={name}===========\n" + show_ctx(tks) for name, tks in self.all_ctxs().items() ] at: nltk.translate.bleu_score sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False) at: warnings simplefilter(action: str, category: Type[Warning]=..., lineno: int=..., append: bool=...) -> None ===========changed ref 0=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 1=========== # module: coeditor.encoding def encode_diffs(diffs: Sequence[str]) -> TokenSeq: """ A helper function to encode the diff lines (with '+', '-', or ' ' prefixes) into a token sequence with the special <add> and <del> tokens. """ + result = TokenSeq() + max_i = len(diffs) - 1 - prefixes = list[TokenSeq]() - code_lines = list[str]() for i, diff in enumerate(diffs): if diff.startswith("+"): + result.append(Add_id) - prefixes.append([Add_id]) elif diff.startswith("-"): + result.append(Del_id) - prefixes.append([Del_id]) else: assert diff.startswith(" ") - prefixes.append([]) - code_lines.append(diff[1:]) - code_tks = _BaseTokenizer.encode("\n".join(code_lines), add_special_tokens=False) - code_lines = split_list(code_tks, Newline_id) - for i, line in enumerate(code_lines): - if prefixes[i]: - code_lines[i] = prefixes[i] + line - return join_list(code_lines, Newline_id) + result.extend(encode_single_line(diff[1:])) + if i < max_i: + result.append(Newline_id) ===========changed ref 2=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = encode_lines(change.earlier) - lines = split_list(encode_basic(change.earlier), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 3=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 4=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 5=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 6=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 7=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": deltas = dict[int, tuple[TokenSeq, ...]]() for k, line_delta in self._deltas.items(): line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": + line_tk_delta.append([Add_id] + encode_single_line(action[1:])) - line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") deltas[k] = tuple(line_tk_delta) return TkDelta(deltas)
coeditor.encoding/compress_change_tks
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> new_lines.append(_OMIT)
# module: coeditor.encoding def compress_change_tks(tks: TokenSeq, max_ctx: int): lines = split_list(tks, sep=Newline_id) to_keep = [False for _ in lines] # mark which lines to keep for i, line in enumerate(lines): if line and (line[0] == Add_id or line[0] == Del_id): for j in range(max(0, i - max_ctx), min(len(lines), i + max_ctx + 1)): to_keep[j] = True new_lines = list[TokenSeq]() i = 0 - OMIT = encode_basic("...") while i < len(lines): if to_keep[i]: new_lines.append(lines[i]) i += 1 else: j = i + 1 while j < len(lines) and not to_keep[j]: j += 1 - new_lines.append(OMIT) <0> i = j return join_list(new_lines, sep=Newline_id)
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] split_list(lst: list[T1], sep: T1) -> list[list[T1]] at: coeditor.encoding Add_id = get_tk_id(Add) Del_id = get_tk_id(Del) Newline_id = get_tk_id("\n") is_extra_id(tk: int) -> bool encode_single_line(text: str, add_special_tokens=False) -> TokenSeq TokenizedEdit() at: coeditor.encoding.TokenizedEdit output_tks: TokenSeq main_tks: TokenSeq at: typing TypeVar(name: str, *constraints: Type[Any], bound: Union[None, Type[Any], str]=..., covariant: bool=..., contravariant: bool=...) ===========changed ref 0=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 1=========== # module: coeditor.encoding TEdit = TypeVar("TEdit", bound=TokenizedEdit) + _OMIT = encode_single_line("...") ===========changed ref 2=========== # module: coeditor.encoding class TokenizedEdit(ABC): def is_repetitive_edit(self, blue_threshold=0.8) -> bool: """Check if all additions in the output_tokens can be matched to an addition in the input_tokens with a BLEU score above the threshold.""" def get_changes(tks, key_tk: Token): if tks and tks[0] == key_tk: s = decode_tokens(tks[1:]) s.strip() + return encode_single_line(s) - return encode_basic(s) else: return [] ctx_lines = split_list(self.input_tks, Newline_id) main_lines = output_ids_as_seqs(self.input_tks) ctx_addtions = [tks for l in ctx_lines if (tks := get_changes(l, Add_id))] ctx_deletions = [tks for l in ctx_lines if (tks := get_changes(l, Del_id))] def has_match(line, line_key: Token): if line: if line[0] == Add_id: added = line[1:] return any( as_any(sentence_bleu([ref], added)) > blue_threshold for ref in ctx_addtions ) elif line == [Del_id]: if line_key not in main_lines: print(f"Key {decode_tokens([line_key])} not found.") print("Main tokens:") print(decode_tokens(self.main_tks)) deleted = main_lines[line_key] return any( as_any(sentence_bleu([ref], deleted)) > blue_threshold for ref in ctx_deletions ) else: raise ValueError(f"Unexpected line: {decode_tokens(line)}") else: return True out_segs = output_ids_as_seqs(self.output_tks)</s> ===========changed ref 3=========== # module: coeditor.encoding class TokenizedEdit(ABC): def is_repetitive_edit(self, blue_threshold=0.8) -> bool: # offset: 1 <s> else: return True out_segs = output_ids_as_seqs(self.output_tks) if all(not s for s in out_segs.values()): return False for k, seg in out_segs.items(): for line in split_list(seg, Newline_id): if not has_match(line, k): return False return True ===========changed ref 4=========== # module: coeditor.encoding def encode_diffs(diffs: Sequence[str]) -> TokenSeq: """ A helper function to encode the diff lines (with '+', '-', or ' ' prefixes) into a token sequence with the special <add> and <del> tokens. """ + result = TokenSeq() + max_i = len(diffs) - 1 - prefixes = list[TokenSeq]() - code_lines = list[str]() for i, diff in enumerate(diffs): if diff.startswith("+"): + result.append(Add_id) - prefixes.append([Add_id]) elif diff.startswith("-"): + result.append(Del_id) - prefixes.append([Del_id]) else: assert diff.startswith(" ") - prefixes.append([]) - code_lines.append(diff[1:]) - code_tks = _BaseTokenizer.encode("\n".join(code_lines), add_special_tokens=False) - code_lines = split_list(code_tks, Newline_id) - for i, line in enumerate(code_lines): - if prefixes[i]: - code_lines[i] = prefixes[i] + line - return join_list(code_lines, Newline_id) + result.extend(encode_single_line(diff[1:])) + if i < max_i: + result.append(Newline_id) ===========changed ref 5=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = encode_lines(change.earlier) - lines = split_list(encode_basic(change.earlier), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 7=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 8=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 9=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) -
coeditor.c3problem/C3ProblemChangeDropout.transform
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> raise AssertionError("decompose_for_change failed.")
# module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: <s>decompose_for_change(keys_to_drop) + if random.random() < self._test_prob: + result1 = delta2.apply_to_change( + delta1.apply_to_change(original.tolist()) + ) + result2 = delta.apply_to_change(original.tolist()) + code1 = tokens_to_change(result1).after + code2 = tokens_to_change(result2).after + if code1 != code2: + print_sections( + ("result1", decode_tokens(result1)), + ("result2", decode_tokens(result2)), + ("delta", str(delta)), + ("keys_to_drop", str(keys_to_drop)), + ("delta1", str(delta1)), + ("delta2", str(delta2)), + ) <0> delta2_groups = delta2.change_groups() if not delta2_groups: print_err(f"{delta=}, {keys_to_drop=}, {delta1=}") raise AssertionError("Empty delta2_groups") new_original = TkArray.new(delta1.apply_to_change(original.tolist())) new_trans = prob.transformations + ("split", "dropout") new_span = dataclasses.replace( prob.span, original=new_original, delta=delta2 ) else: new_trans = prob.transformations + ("split",) new_span = prob.span delta1 = None delta2_groups = delta.change_groups() prob_and_n = list[tuple[C3Problem, int]]() for i in range(start, stop, self.max_lines_to_edit): j = min(i + self.max_lines_to_edit</s>
===========above chunk 0=========== # module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: -1 original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) + if random.random() < self._test_prob: + </s> ===========below chunk 0=========== # module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: # offset: 1 <s>, stop, self.max_lines_to_edit): j = min(i + self.max_lines_to_edit, stop) edit_lines = range(i, j) if delta1 is not None: edit_lines = delta1.get_new_target_lines(edit_lines) line_set = set(edit_lines) n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups) if n_groups > 0: sub_prob = dataclasses.replace( prob, span=new_span, edit_lines=edit_lines, transformations=new_trans, ) prob_and_n.append((sub_prob, n_groups)) # return the problems with the most changes prob_and_n.sort(key=lambda p: p[1], reverse=True) probs = [p[0] for p in prob_and_n] return probs[: self.max_split_factor] ===========unchanged ref 0=========== at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_lines: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Sequence[ChangedCodeSpan] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () at: coeditor.c3problem.C3ProblemTransform transform(self, prob: C3Problem) -> Sequence[C3Problem] at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None at: coeditor.encoding decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str tokens_to_change(tokens: TokenSeq) -> Modified[str] ===========unchanged ref 1=========== at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] apply_to_change(change: TokenSeq) -> TokenSeq decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self] change_groups() -> Sequence[tuple[DeltaKey, ...]] at: coeditor.tk_array.TkArray tolist() -> TokenSeq at: random random = _inst.random at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, but also randomly keep some subset of changes in the input. ### Change log + - v1.2: fix newline encoding bug. - v1.1 - Dropout changes using change groups instead of individual change actions. - Perform dropout at entire problem level ratehr than chunk level. This way, changes in later chunks will be visible as well. - Removed `dropout_prob`. """ + VERSION = "1.2" - VERSION = "1.1" max_lines_to_edit: int = 25 max_split_factor: int = 4 # when dropping the changes into the input, the biggest ratio of changes to drop max_dropout_ratio: float = 0.5 + _test_prob: float = 0.01 ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 2=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: - def summarize(self) -> str: - return "\n".join(self.meta_data_lines()) - ===========changed ref 3=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 4=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def print(self): + main_change = self.span.delta.apply_to_change(self.span.original.tolist()) + print_sections( + ("summary", self.summary()), + ("main change", decode_tokens(main_change)), + ("edit_lines", str(self.edit_lines)), + ) + ===========changed ref 5=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.5: fix newline encoding bug. - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.5" - VERSION = "2.4" # change spans with more than this many lines will be ignored max_span_lines: int = 500 ===========changed ref 6=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) +
coeditor.c3problem/C3ProblemTokenizer._get_offset_tks
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> tks = TkArray.new(encode_single_line(f"# offset: {offset}"))
# module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _get_offset_tks(self, offset: int) -> TkArray: if (tks := self._offset_cache.get(offset)) is None: - tks = TkArray.new(encode_basic(f"# offset: {offset}")) <0> self._offset_cache[offset] = tks return tks
===========unchanged ref 0=========== at: coeditor.c3problem.C3ProblemTokenizer VERSION = "2.3" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 max_ref_tks_sum: int = 512 * 12 ref_chunk_overlap: int = 32 at: coeditor.c3problem.C3ProblemTokenizer.tokenize_problem all_refs = above_chunks + below_chunks at: coeditor.tk_array TkArray() ===========changed ref 0=========== # module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, but also randomly keep some subset of changes in the input. ### Change log + - v1.2: fix newline encoding bug. - v1.1 - Dropout changes using change groups instead of individual change actions. - Perform dropout at entire problem level ratehr than chunk level. This way, changes in later chunks will be visible as well. - Removed `dropout_prob`. """ + VERSION = "1.2" - VERSION = "1.1" max_lines_to_edit: int = 25 max_split_factor: int = 4 # when dropping the changes into the input, the biggest ratio of changes to drop max_dropout_ratio: float = 0.5 + _test_prob: float = 0.01 ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 2=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: - def summarize(self) -> str: - return "\n".join(self.meta_data_lines()) - ===========changed ref 3=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 4=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def print(self): + main_change = self.span.delta.apply_to_change(self.span.original.tolist()) + print_sections( + ("summary", self.summary()), + ("main change", decode_tokens(main_change)), + ("edit_lines", str(self.edit_lines)), + ) + ===========changed ref 5=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.5: fix newline encoding bug. - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.5" - VERSION = "2.4" # change spans with more than this many lines will be ignored max_span_lines: int = 500 ===========changed ref 6=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 7=========== # module: coeditor.encoding TEdit = TypeVar("TEdit", bound=TokenizedEdit) + _OMIT = encode_single_line("...") ===========changed ref 8=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 9=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 10=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 11=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 12=========== # module: coeditor.c3problem + @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) + if random.random() < self._test_prob: + result1 = delta2.apply_to_change( + delta1.apply_to_change(original.tolist()) + ) + result2 = delta.apply_to_change(original.tolist()) + code1 = tokens_to_change(result1).after + code2 = tokens_to_change(result2).after + if code1 != code2: + print_sections( + ("result1", decode_tokens(result1)), + ("result2", decode_tokens(result2)), + ("delta", str(delta)), + ("keys_to_drop", str(keys_to_drop)), + ("delta1", str(delta1)), + ("delta2", str(delta2)), + ) + raise AssertionError("decompose_for_change failed.") delta2_groups = delta2.change_groups() if not delta2_groups: print_err</s>
coeditor.model/RetrievalDecodingResult.exact_match_accuracy
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> print(prob.summary())
# module: coeditor.model @dataclass class RetrievalDecodingResult: def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]: ex2correct = dict[int, bool]() bad_probs = list[C3Problem]() for i, mp in enumerate(self.predictions): prob = self.problems[i] original = prob.span.original.tolist() pred_delta = TkDelta.from_output_tks(mp["output_ids"]) label_delta = TkDelta.from_output_tks(mp["labels"]) if not prob.edit_lines: bad_probs.append(prob) continue line_shift = prob.edit_lines[0] pred_change = pred_delta.shifted(line_shift).apply_to_change(original) label_change = label_delta.shifted(line_shift).apply_to_change(original) pred_code = tokens_to_change(pred_change).after label_code = tokens_to_change(label_change).after ex2correct[i] = code_equal(pred_code, label_code) correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct)) if bad_probs: cprint("yellow", "Number of problems with no edits:", len(bad_probs)) for prob in bad_probs[:5]: - print(prob.summarize()) <0> return correct_count, ex2correct
===========unchanged ref 0=========== at: coeditor._utils cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...) at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.c3problem.C3Problem span: ChangedCodeSpan edit_line_ids: Sequence[int] relevant_changes: Sequence[ChangedCodeSpan] relevant_unchanged: Mapping["PyFullName", "PyDefinition"] change_type: Change[None] src_info: SrcInfo transformations: tuple[str, ...] = () summary() -> str at: coeditor.c3problem.ChangedCodeSpan headers: Sequence[ChangedHeader] original: TkArray delta: TkDelta line_range: LineRange module: ModuleName at: coeditor.change.Modified before: E1 after: E1 unchanged: bool = False at: coeditor.common CountedSum = WeightedSum[int, int] code_equal(code1: str, code2: str) -> bool at: coeditor.encoding TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]]) tokens_to_change(tokens: TokenSeq) -> Modified[str] at: coeditor.encoding.TkDelta _deltas: Mapping[int, tuple[TokenSeq, ...]] apply_to_change(change: TokenSeq) -> TokenSeq shifted(shift_lines: int) -> Self ===========unchanged ref 1=========== from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta" at: coeditor.model.RetrievalDecodingResult eval_args: dict problems: Sequence[C3Problem] predictions: Sequence[RetrievalModelPrediction] at: coeditor.tk_array.TkArray tolist() -> TokenSeq ===========changed ref 0=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 2=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: - def summarize(self) -> str: - return "\n".join(self.meta_data_lines()) - ===========changed ref 3=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 4=========== # module: coeditor.encoding TEdit = TypeVar("TEdit", bound=TokenizedEdit) + _OMIT = encode_single_line("...") ===========changed ref 5=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 6=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 7=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 8=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def print(self): + main_change = self.span.delta.apply_to_change(self.span.original.tolist()) + print_sections( + ("summary", self.summary()), + ("main change", decode_tokens(main_change)), + ("edit_lines", str(self.edit_lines)), + ) + ===========changed ref 9=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 10=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _get_offset_tks(self, offset: int) -> TkArray: if (tks := self._offset_cache.get(offset)) is None: + tks = TkArray.new(encode_single_line(f"# offset: {offset}")) - tks = TkArray.new(encode_basic(f"# offset: {offset}")) self._offset_cache[offset] = tks return tks ===========changed ref 11=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.5: fix newline encoding bug. - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.5" - VERSION = "2.4" # change spans with more than this many lines will be ignored max_span_lines: int = 500 ===========changed ref 12=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: def to_tk_delta(self) -> "TkDelta": deltas = dict[int, tuple[TokenSeq, ...]]() for k, line_delta in self._deltas.items(): line_tk_delta = list[TokenSeq]() for action in line_delta: if action[0] == "+": + line_tk_delta.append([Add_id] + encode_single_line(action[1:])) - line_tk_delta.append([Add_id] + encode_basic(action[1:])) elif action[0] == "-": line_tk_delta.append([Del_id]) else: raise ValueError(f"Invalid action: {action}") deltas[k] = tuple(line_tk_delta) return TkDelta(deltas)
coeditor.model/RetrievalDecodingResult.save_examples_to_dir
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> pickle_dump(out_dir / "ex_probs.pkl", all_probs)
# module: coeditor.model @dataclass class RetrievalDecodingResult: def save_examples_to_dir(self, out_dir: Path, ex2correct: dict[int, bool]) -> None: shutil.rmtree(out_dir, ignore_errors=True) (out_dir / "correct").mkdir(parents=True, exist_ok=True) (out_dir / "incorrect").mkdir(parents=True, exist_ok=True) + all_probs = dict[int, C3Problem]() for ex_id, correct in tqdm(ex2correct.items(), desc="saving examples"): ex = self.predictions[ex_id] + prob = self.problems[ex_id] + compare_str = self.show_prediction(prob, ex) - compare_str = self.show_prediction(self.problems[ex_id], ex) out_file = ( out_dir / ("correct" if correct else "incorrect") / f"ex-{ex_id}.txt" ) out_file.write_text(compare_str) + all_probs[ex_id] = prob <0>
===========unchanged ref 0=========== at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) at: coeditor.model.RetrievalDecodingResult problems: Sequence[C3Problem] predictions: Sequence[RetrievalModelPrediction] show_prediction(prob: C3Problem, pred: RetrievalModelPrediction) -> str at: pathlib Path() at: pathlib.Path __slots__ = () mkdir(mode: int=..., parents: bool=..., exist_ok: bool=...) -> None at: pathlib.PurePath __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") at: shutil rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None at: tqdm.std tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs) ===========changed ref 0=========== # module: coeditor.model @dataclass class RetrievalDecodingResult: def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]: ex2correct = dict[int, bool]() bad_probs = list[C3Problem]() for i, mp in enumerate(self.predictions): prob = self.problems[i] original = prob.span.original.tolist() pred_delta = TkDelta.from_output_tks(mp["output_ids"]) label_delta = TkDelta.from_output_tks(mp["labels"]) if not prob.edit_lines: bad_probs.append(prob) continue line_shift = prob.edit_lines[0] pred_change = pred_delta.shifted(line_shift).apply_to_change(original) label_change = label_delta.shifted(line_shift).apply_to_change(original) pred_code = tokens_to_change(pred_change).after label_code = tokens_to_change(label_change).after ex2correct[i] = code_equal(pred_code, label_code) correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct)) if bad_probs: cprint("yellow", "Number of problems with no edits:", len(bad_probs)) for prob in bad_probs[:5]: + print(prob.summary()) - print(prob.summarize()) return correct_count, ex2correct ===========changed ref 1=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 2=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 3=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: - def summarize(self) -> str: - return "\n".join(self.meta_data_lines()) - ===========changed ref 4=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 5=========== # module: coeditor.encoding TEdit = TypeVar("TEdit", bound=TokenizedEdit) + _OMIT = encode_single_line("...") ===========changed ref 6=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 7=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 8=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 9=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def print(self): + main_change = self.span.delta.apply_to_change(self.span.original.tolist()) + print_sections( + ("summary", self.summary()), + ("main change", decode_tokens(main_change)), + ("edit_lines", str(self.edit_lines)), + ) + ===========changed ref 10=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 11=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _get_offset_tks(self, offset: int) -> TkArray: if (tks := self._offset_cache.get(offset)) is None: + tks = TkArray.new(encode_single_line(f"# offset: {offset}")) - tks = TkArray.new(encode_basic(f"# offset: {offset}")) self._offset_cache[offset] = tks return tks ===========changed ref 12=========== # module: coeditor.c3problem class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]): """ ### Change log + - v2.5: fix newline encoding bug. - v2.4: fix buggy encoding of `Added` and `Deleted` changes. - v2.3: always generate problems with full editing range and move the problem splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`. """ + VERSION = "2.5" - VERSION = "2.4" # change spans with more than this many lines will be ignored max_span_lines: int = 500
coeditor.model/RetrievalEditorModel.encode_token_seqs
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> for ref in references
# module: coeditor.model class RetrievalEditorModel(T5PreTrainedModel): def encode_token_seqs( self, references: Sequence[TokenSeq] | Sequence[str], pad_id=None ) -> LongTensor: references = [ + encode_lines_join(ref) if isinstance(ref, str) else ref - encode_basic(ref) if isinstance(ref, str) else ref for ref in references <0> ] out = pad_token_seqs(references, pad_id=pad_id) out = out.to(self.device) return cast(LongTensor, out)
===========unchanged ref 0=========== at: coeditor.common TokenSeq = list[Token] at: coeditor.encoding encode_lines_join(text: str) -> TokenSeq at: coeditor.model.RetrievalEditorModel is_parallelizable = False supports_gradient_checkpointing = False at: coeditor.model.RetrievalEditorModel.load model = RetrievalEditorModel.from_pretrained(save_dir) at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 1=========== # module: coeditor.model @dataclass class RetrievalDecodingResult: @classmethod def show_prediction(cls, prob: C3Problem, pred: RetrievalModelPrediction) -> str: span = prob.span tk_prob = TkC3Problem( input=TkArray.new(pred["input_ids"]), output=TkArray.new(pred["labels"]), path=span.headers[-1].path, change_type=prob.change_type, named_references=[ (f"reference-{i}", TkArray.new(ref)) for i, ref in enumerate(pred["references"]) ], project=prob.src_info["project"], commit=prob.src_info["commit"], ) - meta_lines = prob.meta_data_lines() + return tk_prob.show(pred["output_ids"]) - all_secs = [*meta_lines, tk_prob.show(pred["output_ids"])] - return "\n".join(all_secs) ===========changed ref 2=========== # module: coeditor.model @dataclass class RetrievalDecodingResult: def save_examples_to_dir(self, out_dir: Path, ex2correct: dict[int, bool]) -> None: shutil.rmtree(out_dir, ignore_errors=True) (out_dir / "correct").mkdir(parents=True, exist_ok=True) (out_dir / "incorrect").mkdir(parents=True, exist_ok=True) + all_probs = dict[int, C3Problem]() for ex_id, correct in tqdm(ex2correct.items(), desc="saving examples"): ex = self.predictions[ex_id] + prob = self.problems[ex_id] + compare_str = self.show_prediction(prob, ex) - compare_str = self.show_prediction(self.problems[ex_id], ex) out_file = ( out_dir / ("correct" if correct else "incorrect") / f"ex-{ex_id}.txt" ) out_file.write_text(compare_str) + all_probs[ex_id] = prob + pickle_dump(out_dir / "ex_probs.pkl", all_probs) ===========changed ref 3=========== # module: coeditor.model @dataclass class RetrievalDecodingResult: def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]: ex2correct = dict[int, bool]() bad_probs = list[C3Problem]() for i, mp in enumerate(self.predictions): prob = self.problems[i] original = prob.span.original.tolist() pred_delta = TkDelta.from_output_tks(mp["output_ids"]) label_delta = TkDelta.from_output_tks(mp["labels"]) if not prob.edit_lines: bad_probs.append(prob) continue line_shift = prob.edit_lines[0] pred_change = pred_delta.shifted(line_shift).apply_to_change(original) label_change = label_delta.shifted(line_shift).apply_to_change(original) pred_code = tokens_to_change(pred_change).after label_code = tokens_to_change(label_change).after ex2correct[i] = code_equal(pred_code, label_code) correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct)) if bad_probs: cprint("yellow", "Number of problems with no edits:", len(bad_probs)) for prob in bad_probs[:5]: + print(prob.summary()) - print(prob.summarize()) return correct_count, ex2correct ===========changed ref 4=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 5=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) + ===========changed ref 6=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: - def summarize(self) -> str: - return "\n".join(self.meta_data_lines()) - ===========changed ref 7=========== # module: coeditor.encoding + def encode_lines(text: str) -> Iterable[TokenSeq]: + return (encode_single_line(l) for l in splitlines(text)) + ===========changed ref 8=========== # module: coeditor.encoding TEdit = TypeVar("TEdit", bound=TokenizedEdit) + _OMIT = encode_single_line("...") ===========changed ref 9=========== # module: coeditor.encoding @dataclass(frozen=True) class StrDelta: + @staticmethod + def from_change(change: Change[str]) -> tuple[str, "StrDelta"]: + line_diffs = change_to_line_diffs(change) + return line_diffs_to_original_delta(line_diffs) + ===========changed ref 10=========== # module: coeditor.encoding - def encode_basic(text: str, add_special_tokens=False) -> TokenSeq: - "Encode a string into a token sequence using the base tokenizer." - return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) - ===========changed ref 11=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def print(self): + main_change = self.span.delta.apply_to_change(self.span.original.tolist()) + print_sections( + ("summary", self.summary()), + ("main change", decode_tokens(main_change)), + ("edit_lines", str(self.edit_lines)), + ) + ===========changed ref 12=========== # module: coeditor.encoding + def encode_single_line(text: str, add_special_tokens=False) -> TokenSeq: + """Encode a string into a token sequence using the base tokenizer. + Note that you should use `encode_lines_join` when `text` contains multiple lines + to ensure that newline characters are consistently encoded as the same token. + """ + return _BaseTokenizer.encode(text, add_special_tokens=add_special_tokens) + ===========changed ref 13=========== # module: coeditor.c3problem @dataclass class C3ProblemTokenizer: def _get_offset_tks(self, offset: int) -> TkArray: if (tks := self._offset_cache.get(offset)) is None: + tks = TkArray.new(encode_single_line(f"# offset: {offset}")) - tks = TkArray.new(encode_basic(f"# offset: {offset}")) self._offset_cache[offset] = tks return tks
tests.test_edits/TestChangeIdentities.test_tk_encodings
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> code_to_input(encode_lines_join(get_before(c))),
# module: tests.test_edits class TestChangeIdentities: def test_tk_encodings(self): for name, c in self.cases.items(): # print(show_change(c)) c_tokens = change_to_tokens(c) print("c_tokens\n------\n", decode_tokens(c_tokens)) c_rec = tokens_to_change(c_tokens) assert_change_eq( c_rec, c, "change_to_tokens |> tokens_to_change = identity: " + name ) in_seq, out_seq = change_to_input_output(c) print("in_seq\n------\n", decode_tokens(in_seq)) print("out_seq\n------\n", decode_tokens(out_seq)) assert_tks_eq( in_seq, - code_to_input( - _BaseTokenizer.encode(get_before(c), add_special_tokens=False) - ), <0> "change_to_input_output mathese code_to_input: " + name, ) if len(splitlines(get_before(c))) < N_Extra_Ids: inlined = inline_output_tokens(in_seq, out_seq) if inlined: assert inlined[-1] == Newline_id assert_tks_eq( inlined[:-1], change_to_tokens(c), "inline_output_tokens: " + name ) c_rec2 = tokens_to_change(inlined[:-1]) assert_change_eq(c_rec2, c, "tokens_to_change(inlined): " + name)
===========unchanged ref 0=========== at: coeditor.common print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None assert_str_equal(actual: str, expect: str, name: str | None=None) at: coeditor.encoding decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] change_to_tokens(change: Change[str]) -> TokenSeq tokens_to_change(tokens: TokenSeq) -> Modified[str] change_to_input_output(change: Change[str]) -> tuple[TokenSeq, TokenSeq] at: tests.test_edits get_before(change: Change[str]) -> str get_after(change: Change[str]) -> str assert_change_eq(actual: Change[str], expected: Change[str], name: str) assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str) ===========unchanged ref 1=========== at: tests.test_edits.TestChangeIdentities cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1</s> ===========changed ref 0=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = encode_lines(change.earlier) - lines = split_list(encode_basic(change.earlier), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 1=========== # module: tests.test_edits class TestChangeIdentities: cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ #</s> ===========changed ref 2=========== # module: tests.test_edits class TestChangeIdentities: # offset: 1 <s> "<del>\tx" return x + y """ ), dedent( """\ # new comment 1 # new comment 2 def f1(): if newcond: x = "<add>" new_var = 5 y = "<del>" return x + new_var + y """ ), ), "super long": Modified( "\n".join(f"x = {i}" for i in range(0, 200)), "\n".join(f"x = {2* (i // 2)}" for i in range(0, 200)), ), + "strings with newlines": Modified( + dedent( + """\ + If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n + `multiprocessing` to run the environments in parallel) \n + """ + ), + dedent( + """\ + If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n + `multiprocessing` to run the environments in parallel) \n + Added a line here. \n + and here. + """ + ), + ), } ===========changed ref 3=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + @property + def path(self) -> ProjectPath: + return self.span.headers[-1].path + ===========changed ref 4=========== # module: coeditor.c3problem @dataclass(frozen=True) class C3Problem: + def summary(self) -> str: + return "\n".join(self.meta_data_lines()) +
tests.test_edits/TestChangeIdentities.test_str_tk_conversion
Modified
temp-1
703cdaf8bc6456ee94a5afffc6c9f902bae79b03
Fix inconsistent encoding of newlines.
<0>:<add> if tk_after != encode_lines_join(get_after(c)):
# module: tests.test_edits class TestChangeIdentities: def test_str_tk_conversion(self): for name, c in self.cases.items(): line_diffs = change_to_line_diffs(c) print("line_diffs\n------\n" + "\n".join(line_diffs)) before, delta = line_diffs_to_original_delta(line_diffs) print("delta:", delta) tk_delta = delta.to_tk_delta() + tk_before = encode_lines_join(before) - tk_before = encode_basic(before) tk_after = tk_delta.apply_to_input(tk_before) - if tk_after != encode_basic(get_after(c)): <0> print("after diff:\n") print(show_string_diff(get_after(c), decode_tokens(tk_after))) c_tokens = tk_delta.apply_to_change(tk_before) if c_tokens != change_to_tokens(c): print("c_tokens diff:\n") print( show_string_diff( decode_tokens(c_tokens), decode_tokens(change_to_tokens(c)) ) ) origin1, tk_delta1 = change_tks_to_original_delta(c_tokens) if origin1 != tk_before: print("origin diff:\n") print( show_string_diff(decode_tokens(origin1), decode_tokens(tk_before)) ) assert tk_delta1.apply_to_input(origin1) == tk_after
===========unchanged ref 0=========== at: coeditor._utils show_string_diff(str1: str, str2: str, max_ctx: int | None=6) -> str at: coeditor.common splitlines(text: str) -> list[str] at: coeditor.encoding Newline_id = get_tk_id("\n") N_Extra_Ids = 100 decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str encode_lines_join(text: str) -> TokenSeq change_to_line_diffs(change: Change[str]) -> list[str] line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta] change_to_tokens(change: Change[str]) -> TokenSeq tokens_to_change(tokens: TokenSeq) -> Modified[str] code_to_input(code_tks: TokenSeq) -> TokenSeq inline_output_tokens(input: TokenSeq, output: TokenSeq, leave_unpredicted=False) -> TokenSeq at: tests.test_edits get_before(change: Change[str]) -> str get_after(change: Change[str]) -> str assert_change_eq(actual: Change[str], expected: Change[str], name: str) assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str) ===========unchanged ref 1=========== at: tests.test_edits.TestChangeIdentities cases: dict[str, Change[str]] = { "empty": Modified("", ""), "generation": Modified("", "123"), "added": Added("a\nb\nc\n"), "deleted": Deleted("a\nb\nc\n"), "no change": Modified( dedent( """\ def f1(): x = 1 """ ), dedent( """\ def f1(): x = 1 """ ), ), "unchanged=True": Modified.from_unchanged( dedent( """\ def f1(): x = 1 """ ), ), # this test case cannot pass for some reason. Tokenizer bug? # "leading_whitespace": Modified.from_unchanged(" ..."), "replace last": Modified( dedent( """\ def f1(): x = 1""" ), dedent( """\ def f1(): x = 2 return x * 2""" ), ), "no special tokens": Modified( dedent( """\ def f1(): x = 1 y = 2 z = x + y return z def f2(): f1()""" ), dedent( """\ # new comment def f_new(): x = 1 if x > 0: y = 2 * x y *= 2 z = x + y return z def f2(): f1() return f_new() + a new_var = 0 """ ), ), "with special tokens": Modified( dedent( """\ def f1(): x = "<add>" y = "<del>\tx" return x + y """ ), dedent( """\ # new comment 1</s> ===========unchanged ref 2=========== at: tests.test_edits.TestChangeIdentities.test_tk_encodings in_seq, out_seq = change_to_input_output(c) in_seq, out_seq = change_to_input_output(c) ===========changed ref 0=========== # module: coeditor.encoding + def encode_lines_join(text: str) -> TokenSeq: + """Encode a mutliple line str such that the line breaks are consistently encoded.""" + return join_list(encode_lines(text), sep=Newline_id) + ===========changed ref 1=========== # module: coeditor.encoding def change_to_tokens(change: Change[str]) -> TokenSeq: match change: case Modified(before=before, after=after, unchanged=unchanged): if unchanged or before == after: + return encode_lines_join(before) - return encode_basic(before) else: diffs = change_to_line_diffs(change) return encode_diffs(diffs) case Added() | Deleted(): + lines = encode_lines(change.earlier) - lines = split_list(encode_basic(change.earlier), Newline_id) tk = Add_id if isinstance(change, Added) else Del_id return join_list(([tk] + line for line in lines), Newline_id) case _: raise AssertionError(f"Not a change type: {change}") ===========changed ref 2=========== # module: tests.test_edits class TestChangeIdentities: def test_tk_encodings(self): for name, c in self.cases.items(): # print(show_change(c)) c_tokens = change_to_tokens(c) print("c_tokens\n------\n", decode_tokens(c_tokens)) c_rec = tokens_to_change(c_tokens) assert_change_eq( c_rec, c, "change_to_tokens |> tokens_to_change = identity: " + name ) in_seq, out_seq = change_to_input_output(c) print("in_seq\n------\n", decode_tokens(in_seq)) print("out_seq\n------\n", decode_tokens(out_seq)) assert_tks_eq( in_seq, - code_to_input( - _BaseTokenizer.encode(get_before(c), add_special_tokens=False) - ), + code_to_input(encode_lines_join(get_before(c))), "change_to_input_output mathese code_to_input: " + name, ) if len(splitlines(get_before(c))) < N_Extra_Ids: inlined = inline_output_tokens(in_seq, out_seq) if inlined: assert inlined[-1] == Newline_id assert_tks_eq( inlined[:-1], change_to_tokens(c), "inline_output_tokens: " + name ) c_rec2 = tokens_to_change(inlined[:-1]) assert_change_eq(c_rec2, c, "tokens_to_change(inlined): " + name)
coeditor.dataset/_process_commits
Modified
temp-1
fedac8b34b8f35d0fc50414ef2c88c227f1464cf
Fix data processing temp paths.
<0>:<add> tempdir=workdir / "code" / root.name,
# module: coeditor.dataset def _process_commits( root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem], ) -> _ProcessingResult: # use process-specific parso cache fix_jedi_cache(workdir) scoped_changes._tlogger.clear() change_processor.clear_stats() change_processor.set_training(is_training) try: # cannot return here since subprocess will be killed after returning edits = edits_from_commit_history( root, commits, - tempdir=workdir / "code", <0> change_processor=change_processor, silent=True, time_limit=time_limit_per_commit * (len(commits) + 10), ) except Exception as e: if isinstance(e, KeyboardInterrupt): raise warnings.warn(f"Failed to process project: {root}\nError: {e}") traceback.print_exception(e, limit=-6) edits = [] stats = dict() change_processor.append_stats(stats) rec_add_dict_to(stats, {"tlogger": scoped_changes._tlogger.times}) return _ProcessingResult(edits, stats)
===========unchanged ref 0=========== at: _warnings warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None at: coeditor._utils.TimeLogger times: dict[str, list[float]] = field(default_factory=dict) clear() at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) fix_jedi_cache(cache_dir: Path) at: coeditor.common rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y) at: coeditor.dataset _ProcessingResult(edits: Sequence[C3Problem], stats: dict[str, dict | Any]) time_limit_per_commit = 10.0 at: coeditor.git CommitInfo(hash: str, parents: tuple[str, ...], msg: str) at: coeditor.scoped_changes _tlogger = TimeLogger() ProjectChangeProcessor() edits_from_commit_history(project_dir: Path, history: Sequence[CommitInfo], tempdir: Path, change_processor: ProjectChangeProcessor[TProb]=NoProcessing(), ignore_dirs=DefaultIgnoreDirs, silent: bool=False, time_limit: float | None=None) -> Sequence[TProb] at: coeditor.scoped_changes.ProjectChangeProcessor clear_stats() append_stats(stats: dict[str, Any]) -> None set_training(is_training: bool) -> None at: pathlib Path() ===========unchanged ref 1=========== at: pathlib.PurePath __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") at: traceback print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None at: typing Sequence = _alias(collections.abc.Sequence, 1)
coeditor.dataset/dataset_from_projects
Modified
temp-1
fedac8b34b8f35d0fc50414ef2c88c227f1464cf
Fix data processing temp paths.
<0>:<add> workdir = Path(tempfile.gettempdir()) / "dataset_from_projects" / f"pid-{pid}"
# module: coeditor.dataset def dataset_from_projects( project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int = 1000, workers: int = DefaultWorkers, ) -> "Mapping[Path, Sequence[C3Problem]]": """ Create a TokenizedEditDataset from a list of project roots and a given encoder. Args: - max_history_per_repo (int, optional): When the repo history is longer than this value, only the oldest portion is going to be used. Defaults to 1000. """ + # get the process id + pid = os.getpid() - workdir = Path(tempfile.gettempdir()) / "dataset_from_projects" <0> histories = pmap( get_commit_history, project_roots, max_workers=workers, desc="Getting commit histories", tqdm_args={"unit": "repo"}, ) # keep the oldest portion of the history histories = [commits[-max_history_per_repo:] for commits in histories] # break long commit sequences into chunks for parallelization roots = list[Path]() chunk_training = list[bool]() chunked_histories = list[list[CommitInfo]]() for root, h, train in zip(project_roots, histories, repo_training): history_chunk_size = max(50, math.ceil(len(h) / 10)) for i in range(0, len(h), history_chunk_size): roots.append(root) chunk_training.append(train) # note that we need 1 extra overlapping commit to get all diffs chunked_histories.append(h[i : i + history_chunk_size + 1]) workdirs = [workdir / f"chunk-{i}" for i in range(len(roots))] try: presults = pmap( _process_commits, roots, workdirs, chunked_</s>
===========below chunk 0=========== # module: coeditor.dataset def dataset_from_projects( project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int = 1000, workers: int = DefaultWorkers, ) -> "Mapping[Path, Sequence[C3Problem]]": # offset: 1 <s> try: presults = pmap( _process_commits, roots, workdirs, chunked_histories, chunk_training, key_args={"change_processor": change_processor}, max_workers=workers, tqdm_args={"unit": "chunk"}, ) finally: if workdir.exists(): shutil.rmtree(workdir) print("Workdir removed:", workdir) project2edits = dict[Path, list[C3Problem]]() try: stats = dict[str, Any]() for root, pr in zip(roots, presults): project2edits.setdefault(root, []).extend(pr.edits) rec_add_dict_to(stats, pr.stats) if "tlogger" in stats: df = TimeLogger.times_to_dataframe(stats.pop("tlogger")) print("Time stats:") display(df) if "analyzer_errors" in list(stats.keys()): errors: dict = stats.pop("analyzer_errors") for k in list(errors.keys()): if JediUsageAnalyzer.is_known_error(k): errors.pop(k) if errors: print("Analyzer errors:") for k in sorted(errors.keys(), key=lambda k: errors[k], reverse=True): print(f"{k}:\t{errors[k]}") if stats: print("Other Stats:") pretty_print_dict(stats) except Exception as e: if not isinstance(e, KeyboardInterrupt): print("Error while printing stats:", e) </s> ===========below chunk 1=========== # module: coeditor.dataset def dataset_from_projects( project_roots: Sequence[Path], change_processor: ProjectChangeProcessor[C3Problem], repo_training: Sequence[bool], max_history_per_repo: int = 1000, workers: int = DefaultWorkers, ) -> "Mapping[Path, Sequence[C3Problem]]": # offset: 2 <s> except Exception as e: if not isinstance(e, KeyboardInterrupt): print("Error while printing stats:", e) return project2edits ===========unchanged ref 0=========== at: IPython.core.display_functions display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs) at: coeditor._utils DefaultWorkers: int = multiprocessing.cpu_count() // 2 global DefaultWorkers pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1] TimeLogger(times: dict[str, list[float]]=field(default_factory=dict)) pretty_print_dict(d: dict, level: int=0, max_show_level: int=1000, float_precision: int=5) at: coeditor._utils.TimeLogger times_to_dataframe(times: dict[str, list[float]]) at: coeditor.c3problem C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=()) JediUsageAnalyzer(include_parent_usages: bool=True, include_builtins: bool=False) at: coeditor.c3problem.JediUsageAnalyzer include_parent_usages: bool = True include_builtins: bool = False ===========unchanged ref 1=========== _KnownJediErrors = { "not enough values to unpack (expected 2", "'Newline' object has no attribute 'children'", "trailer_op is actually ", "There's a scope that was not managed: <Module", "maximum recursion depth exceeded", "'NoneType' object has no attribute 'type'", } is_known_error(err_text: str) at: coeditor.common rec_add_dict_to(target: dict[str, Any], value: Mapping[str, Any], value_merger: Callable[[Any, Any], Any]=lambda x, y: x + y) at: coeditor.dataset _process_commits(root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem]) -> _ProcessingResult at: coeditor.dataset._ProcessingResult edits: Sequence[C3Problem] stats: dict[str, dict | Any] at: coeditor.git CommitInfo(hash: str, parents: tuple[str, ...], msg: str) get_commit_history(project_dir: Path, max_history: int | None=None, commit_id: str="HEAD") -> list[CommitInfo] at: coeditor.scoped_changes ProjectChangeProcessor() at: math ceil(x: SupportsFloat, /) -> int at: os getpid() -> int at: pathlib Path() at: pathlib.Path __slots__ = () exists() -> bool at: shutil rmtree(path: Union[bytes, StrPath], ignore_errors: bool=..., onerror: Optional[Callable[[Any, Any, Any], Any]]=...) -> None at: tempfile gettempdir() -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========unchanged ref 2=========== at: typing.MutableMapping pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T] pop(key: _KT) -> _VT ===========changed ref 0=========== # module: coeditor.dataset def _process_commits( root: Path, workdir: Path, commits: Sequence[CommitInfo], is_training: bool, change_processor: ProjectChangeProcessor[C3Problem], ) -> _ProcessingResult: # use process-specific parso cache fix_jedi_cache(workdir) scoped_changes._tlogger.clear() change_processor.clear_stats() change_processor.set_training(is_training) try: # cannot return here since subprocess will be killed after returning edits = edits_from_commit_history( root, commits, + tempdir=workdir / "code" / root.name, - tempdir=workdir / "code", change_processor=change_processor, silent=True, time_limit=time_limit_per_commit * (len(commits) + 10), ) except Exception as e: if isinstance(e, KeyboardInterrupt): raise warnings.warn(f"Failed to process project: {root}\nError: {e}") traceback.print_exception(e, limit=-6) edits = [] stats = dict() change_processor.append_stats(stats) rec_add_dict_to(stats, {"tlogger": scoped_changes._tlogger.times}) return _ProcessingResult(edits, stats)
coeditor.common/random_subset
Modified
temp-1
4f5cb29cfbc55d7d9f6407473bc0766f831b8a94
Improve C3DataLoader performance.
<0>:<add> return {(k := keys[i]): all[k] for i in ids[:n]}
# module: coeditor.common + def random_subset(all, n: int, rng: random.Random | int | None = None): - def random_subset(all, n: int, rng: random.Random | None = None): if rng is None: + rng = random.Random() - rng = random.Random(42) + elif isinstance(rng, int): + rng = random.Random(rng) if isinstance(all, Sequence): + ids = list(range(len(all))) - xs = [x for x in all] + rng.shuffle(ids) - rng.shuffle(xs) + xs = [all[i] for i in ids[:n]] + return xs - return xs[:n] elif isinstance(all, Mapping): keys = [k for k in all] + ids = list(range(len(keys))) + rng.shuffle(ids) - rng.shuffle(keys) - return {k: all[k] for k in keys[:n]} <0> else: raise ArgumentError(all, f"Unsupported arg type: {type(all)}")
===========unchanged ref 0=========== at: random Random(x: Any=...) at: random.Random VERSION = 3 # used by getstate/setstate _randbelow = _randbelow_with_getrandbits shuffle(x: MutableSequence[Any], random: Optional[Callable[[], float]]=...) -> None at: typing Mapping = _alias(collections.abc.Mapping, 2) Sequence = _alias(collections.abc.Sequence, 1)
coeditor.model/C3DataLoader._to_tokenized
Modified
temp-1
4f5cb29cfbc55d7d9f6407473bc0766f831b8a94
Improve C3DataLoader performance.
<0>:<add> tqdm_args={"disable": True},
# module: coeditor.model @dataclass class C3DataLoader: def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]: probs = list(probs) + if self.transform is not None: + # we can afford to store all transformed problems beforehand + probs = join_list(pmap(self.transform.transform, probs, chunksize=500)) if self.shuffle: + # we need to shuffle after the transform to help serialization + # this also mixes the problems better random.shuffle(probs) for i in range(0, len(probs), self.chunk_size): + # we can only afford to tokenize the problems on-the-fly group = probs[i : i + self.chunk_size] - if self.transform is not None: - group = join_list( - pmap(self.transform.transform, group, tqdm_args={"disable": True}) - ) yield from pmap( + self.tokenizer.tokenize_problem, + group, - self.tokenizer.tokenize_problem, group, tqdm_args={"disable": True} <0> )
===========unchanged ref 0=========== at: coeditor._utils pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1] at: coeditor.c3problem.C3ProblemTokenizer VERSION = "2.7" max_ref_tks: int = 512 max_query_tks: int = 512 max_output_tks: int = 256 max_scope_tks: int = 128 max_ref_tks_sum: int = 512 * 16 ref_chunk_overlap: int = 32 disable_builtin_defs: bool = True disable_unchanged_refs: bool = False current_code_only: bool = False tokenize_problem(problem: C3Problem) -> TkC3Problem at: coeditor.c3problem.C3ProblemTransform transform(prob: C3Problem) -> Sequence[C3Problem] at: coeditor.common join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1] at: coeditor.model.C3DataLoader all_probs: Sequence[C3Problem] transform: C3ProblemTransform | None tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 at: random shuffle = _inst.shuffle ===========changed ref 0=========== # module: coeditor.model @dataclass class C3DataLoader: all_probs: Sequence[C3Problem] transform: C3ProblemTransform | None tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 - workers: int = 10 ===========changed ref 1=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): + def __post_init__(self): + self._rng = random.Random() + ===========changed ref 2=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, but also randomly keep some subset of changes in the input. ### Change log + - v1.3: make `random_subset` truely random. - v1.2: fix newline encoding bug. - v1.1 - Dropout changes using change groups instead of individual change actions. - Perform dropout at entire problem level ratehr than chunk level. This way, changes in later chunks will be visible as well. - Removed `dropout_prob`. """ + VERSION = "1.3" - VERSION = "1.2" max_lines_to_edit: int = 25 max_split_factor: int = 4 # when dropping the changes into the input, the biggest ratio of changes to drop max_dropout_ratio: float = 0.5 _test_prob: float = 0.01 ===========changed ref 3=========== # module: coeditor.common + def random_subset(all, n: int, rng: random.Random | int | None = None): - def random_subset(all, n: int, rng: random.Random | None = None): if rng is None: + rng = random.Random() - rng = random.Random(42) + elif isinstance(rng, int): + rng = random.Random(rng) if isinstance(all, Sequence): + ids = list(range(len(all))) - xs = [x for x in all] + rng.shuffle(ids) - rng.shuffle(xs) + xs = [all[i] for i in ids[:n]] + return xs - return xs[:n] elif isinstance(all, Mapping): keys = [k for k in all] + ids = list(range(len(keys))) + rng.shuffle(ids) - rng.shuffle(keys) + return {(k := keys[i]): all[k] for i in ids[:n]} - return {k: all[k] for k in keys[:n]} else: raise ArgumentError(all, f"Unsupported arg type: {type(all)}") ===========changed ref 4=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) + keys_to_drop = join_list( + random_subset(grouped_keys, n_to_drop, rng=self._rng) + ) - keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) if random.random() < self._test_prob: result1 = delta2.apply_to_change( delta1.apply_to_change(original.tolist()) ) result2 = delta.apply_to_change(original.tolist()) code1 = tokens_to_change(result1).after code2 = tokens_to_change(result2).after if code1 != code2: print_sections( ("result1", decode_tokens(result1)), ("result2", decode_tokens(result2)), ("delta", str(delta)), ("keys_to_drop", str(keys_to_drop)), ("delta1", str(delta1)), ("delta2", str(delta2)), ) raise AssertionError("decompose_for_change failed.") delta2</s>
coeditor.model/C3DataLoader.estimate_batch_stats
Modified
temp-1
4f5cb29cfbc55d7d9f6407473bc0766f831b8a94
Improve C3DataLoader performance.
<0>:<add> return size_est, batch_stats
# module: coeditor.model @dataclass class C3DataLoader: def estimate_batch_stats(self): + factor = 10 + n = max(1, len(self.all_probs) // factor) + subset = random_subset(self.all_probs, n, rng=42) + batches = self._problems_to_batches(self._to_tokenized(subset)) - batches = self._problems_to_batches(self._to_tokenized(self.all_probs)) bsizes = list[int]() + for b in tqdm(batches, desc="estimate_batch_stats", smoothing=0.0): - for b in batches: bsizes.append(len(b["input_ids"])) batch_stats = {k: f"{v:.1f}" for k, v in scalar_stats(bsizes).items()} + # better to have a smaller estimate to avoid triggering data regeneration + size_est = max(1, int(len(self.all_probs) / n * len(bsizes) * 0.99)) - return len(bsizes), batch_stats <0>
===========unchanged ref 0=========== at: coeditor.common random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2] random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1] at: coeditor.model.C3DataLoader all_probs: Sequence[C3Problem] ===========changed ref 0=========== # module: coeditor.common + def random_subset(all, n: int, rng: random.Random | int | None = None): - def random_subset(all, n: int, rng: random.Random | None = None): if rng is None: + rng = random.Random() - rng = random.Random(42) + elif isinstance(rng, int): + rng = random.Random(rng) if isinstance(all, Sequence): + ids = list(range(len(all))) - xs = [x for x in all] + rng.shuffle(ids) - rng.shuffle(xs) + xs = [all[i] for i in ids[:n]] + return xs - return xs[:n] elif isinstance(all, Mapping): keys = [k for k in all] + ids = list(range(len(keys))) + rng.shuffle(ids) - rng.shuffle(keys) + return {(k := keys[i]): all[k] for i in ids[:n]} - return {k: all[k] for k in keys[:n]} else: raise ArgumentError(all, f"Unsupported arg type: {type(all)}") ===========changed ref 1=========== # module: coeditor.model @dataclass class C3DataLoader: all_probs: Sequence[C3Problem] transform: C3ProblemTransform | None tokenizer: C3ProblemTokenizer batch_args: BatchArgs shuffle: bool desc: str tqdm_args: dict | None = None chunk_size: int = 1000 - workers: int = 10 ===========changed ref 2=========== # module: coeditor.model @dataclass class C3DataLoader: def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]: probs = list(probs) + if self.transform is not None: + # we can afford to store all transformed problems beforehand + probs = join_list(pmap(self.transform.transform, probs, chunksize=500)) if self.shuffle: + # we need to shuffle after the transform to help serialization + # this also mixes the problems better random.shuffle(probs) for i in range(0, len(probs), self.chunk_size): + # we can only afford to tokenize the problems on-the-fly group = probs[i : i + self.chunk_size] - if self.transform is not None: - group = join_list( - pmap(self.transform.transform, group, tqdm_args={"disable": True}) - ) yield from pmap( + self.tokenizer.tokenize_problem, + group, + tqdm_args={"disable": True}, - self.tokenizer.tokenize_problem, group, tqdm_args={"disable": True} ) ===========changed ref 3=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): + def __post_init__(self): + self._rng = random.Random() + ===========changed ref 4=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`, but also randomly keep some subset of changes in the input. ### Change log + - v1.3: make `random_subset` truely random. - v1.2: fix newline encoding bug. - v1.1 - Dropout changes using change groups instead of individual change actions. - Perform dropout at entire problem level ratehr than chunk level. This way, changes in later chunks will be visible as well. - Removed `dropout_prob`. """ + VERSION = "1.3" - VERSION = "1.2" max_lines_to_edit: int = 25 max_split_factor: int = 4 # when dropping the changes into the input, the biggest ratio of changes to drop max_dropout_ratio: float = 0.5 _test_prob: float = 0.01 ===========changed ref 5=========== # module: coeditor.c3problem @dataclass class C3ProblemChangeDropout(C3ProblemTransform): def transform(self, prob: C3Problem) -> Sequence[C3Problem]: original = prob.span.original delta = prob.span.delta l_range = prob.edit_lines assert isinstance(l_range, range) start, stop = l_range.start, l_range.stop grouped_keys = delta.change_groups() should_dropout = len(grouped_keys) >= 2 if should_dropout: n_to_drop = int( len(grouped_keys) * random.random() * self.max_dropout_ratio ) assert n_to_drop < len(grouped_keys) + keys_to_drop = join_list( + random_subset(grouped_keys, n_to_drop, rng=self._rng) + ) - keys_to_drop = join_list(random_subset(grouped_keys, n_to_drop)) else: keys_to_drop = [] if keys_to_drop: delta1, delta2 = delta.decompose_for_change(keys_to_drop) if random.random() < self._test_prob: result1 = delta2.apply_to_change( delta1.apply_to_change(original.tolist()) ) result2 = delta.apply_to_change(original.tolist()) code1 = tokens_to_change(result1).after code2 = tokens_to_change(result2).after if code1 != code2: print_sections( ("result1", decode_tokens(result1)), ("result2", decode_tokens(result2)), ("delta", str(delta)), ("keys_to_drop", str(keys_to_drop)), ("delta1", str(delta1)), ("delta2", str(delta2)), ) raise AssertionError("decompose_for_change failed.") delta2</s>
scripts.train_model/train_model
Modified
temp-1
4f5cb29cfbc55d7d9f6407473bc0766f831b8a94
Improve C3DataLoader performance.
<0>:<add> out_dir, random_subset(exact_correct_map, max_saved_samples, rng=42)
# module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): <s>fly + train_loader = C3DataLoader( + datasets["train"], + encoder.problem_tranform, + train_tkn, + batch_args, + shuffle=True, + desc="training", + ) model.train_on_data(model_name, train_loader, eval_loader, train_args) model.to("cuda") with timed_action("Loss Evaluation"): eval_result = model.eval_loss_on_loader(eval_loader) eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()} wandb.log(eval_dict) max_saved_samples = 300 with timed_action("Accuracy Evaluation"): dec_result = model.predict_on_data( datasets["test"], eval_tkn, eval_batch_args, dec_args ) pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result) exact_acc, exact_correct_map = dec_result.exact_match_accuracy() wandb.log({"test/exact-acc": exact_acc.average()}) out_dir = get_model_dir() / model_name / "exact_match_samples" dec_result.save_examples_to_dir( - out_dir, random_subset(exact_correct_map, max_saved_samples) <0> ) cprint("blue", "Exact-match samples saved to:", out_dir) return model
===========above chunk 0=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -1 <s>training", - ) - with timed_action("Warm-up Training"): warmup_bargs = copy.deepcopy(batch_args) warmup_bargs.min_queries *= 4 warmup_bargs.max_queries *= 2 + warm_up_data = random_subset( + datasets["train"], len(datasets["train"]) // 4, rng=42 - warm_up_data = random_subset(datasets["train"], len(datasets["train"]) // 4) + ) + warmup_tkn = copy.copy(train_tkn) - warmup_tkn = copy.deepcopy(train_tkn) warmup_tkn.max_ref_tks_sum //= 3 warmup_loader = C3DataLoader( warm_up_data, encoder.problem_tranform, warmup_tkn, warmup_bargs, shuffle=True, desc="warm-up training", ) warmup_targs = copy.deepcopy(train_args) warmup_targs.learning_rate *= 4 warmup_targs.max_train_epochs = 1 model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs) with timed_action("Fine-tune Training"): + # we attach the problem transform to the dataloader to generate data on-the-fly + train_loader = C3DataLoader( + datasets["train"], + encoder.problem_tranform</s> ===========above chunk 1=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -2 <s> model = RetrievalEditorModel.from_code_t5( "base", reuse_embed=True, reinit_weights=train_args.reinit_weights ) else: model = RetrievalEditorModel.load(get_model_dir() / model_name) if os.getenv("CUDA_VISIBLE_DEVICES") is None: warnings.warn( "CUDA_VISIBLE_DEVICES not set, using 0. Note that " "the Huggingface Trainer will use all visible GPUs for training." ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" train_tkn = encoder.edit_tokenizer eval_tkn = copy.deepcopy(train_tkn) eval_tkn.max_ref_tks_sum *= 2 eval_loader = C3DataLoader( datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval" ) if not eval_only: - # we attach the problem transform to the dataloader to generate data on-the-fly - train_loader = C3DataLoader( - datasets["train"], - encoder.problem_tranform, - train_tkn, - batch_args, - shuffle=True, - desc="training", - ) - with timed_action("Warm-up Training"): warmup_bargs =</s> ===========above chunk 2=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -3 <s> "quicktest-" + model_name if not eval_only: check_save_dir(model_name) # problems will be transformed and saved for valid and test but not train. datasets = make_or_load_dataset( dataset_name, encoder.change_processor, encoder.problem_tranform, remake_problems=recreate_data, ) config_dict = { k: get_modified_args(v) for k, v in { "edit_tokenizer": encoder.edit_tokenizer.get_args(), "batch_args": batch_args, "train_args": train_args, "dec_args": dec_args, }.items() } project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest" if eval_only: project = "eval-" + project wandb.init(dir="..", project=project, name=model_name, config=config_dict) if train_args.quicktest: print("Using fewer data for quick test.") n_quick_exs = 20 datasets = C3ProblemDataset( train=datasets["train"][:n_quick_exs], valid=datasets["valid"][:n_quick_exs], test=datasets["test"][:n_quick_exs], ) if not eval_only:</s> ===========above chunk 3=========== # module: scripts.train_model def train_model( dataset_name="medium", model_variant="-sig-analysis-post_usees", encoder: C3EditEncoder = C3EditEncoder(), batch_args=BatchArgs.train_default(), eval_batch_args=BatchArgs.eval_default(), train_args=TrainingArgs(), recreate_data: bool = False, eval_only: bool = False, ): # offset: -4 # model_variant = "-file" model_name = f"coeditor-{dataset_name}" model_name += model_variant dec_args = DecodingArgs() if train_args.quicktest: model_</s>