path
stringlengths 15
77
| type
stringclasses 1
value | project
stringclasses 1
value | commit_hash
stringlengths 40
40
| commit_message
stringlengths 15
198
| ground_truth
stringlengths 26
155
| main_code
stringlengths 176
2.5k
| context
stringlengths 91
9.37k
|
---|---|---|---|---|---|---|---|
coeditor.model/RetrievalDecodingResult.exact_match_accuracy | Modified | temp-1 | ed6838082d5251960112524997fdb2cffc0bee7e | Fix TkDelta from output_tks. | <0>:<add> label_delta = TkDelta.from_output_tks(prob.edit_lines, mp["labels"])
| # module: coeditor.model
@dataclass
class RetrievalDecodingResult:
def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]:
ex2correct = dict[int, bool]()
bad_probs = list[C3Problem]()
for i, mp in enumerate(self.predictions):
prob = self.problems[i]
original = prob.span.original.tolist()
+ pred_delta = TkDelta.from_output_tks(prob.edit_lines, mp["output_ids"])
- pred_delta = TkDelta.from_output_tks(mp["output_ids"])
- label_delta = TkDelta.from_output_tks(mp["labels"])
<0> if not prob.edit_lines:
bad_probs.append(prob)
continue
line_shift = prob.edit_lines[0]
pred_change = pred_delta.shifted(line_shift).apply_to_change(original)
label_change = label_delta.shifted(line_shift).apply_to_change(original)
pred_code = tokens_to_change(pred_change).after
label_code = tokens_to_change(label_change).after
ex2correct[i] = code_equal(pred_code, label_code)
correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct))
if bad_probs:
cprint("yellow", "Number of problems with no edits:", len(bad_probs))
for prob in bad_probs[:5]:
print(prob.summary())
return correct_count, ex2correct
| ===========unchanged ref 0===========
at: coeditor._utils
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
summary() -> str
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
CountedSum = WeightedSum[int, int]
code_equal(code1: str, code2: str) -> bool
at: coeditor.encoding
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
shifted(shift_lines: int) -> Self
===========unchanged ref 1===========
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
at: coeditor.model.RetrievalDecodingResult
eval_args: dict
problems: Sequence[C3Problem]
predictions: Sequence[RetrievalModelPrediction]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
|
coeditor.model/RetrievalEditorModel.predict_on_batch | Modified | temp-1 | ed6838082d5251960112524997fdb2cffc0bee7e | Fix TkDelta from output_tks. | <0>:<add> pred = apply_output_tks_to_change(change_tks, 0, out)
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ originals: Sequence[TokenSeq],
- requests: Sequence["EditRequest"],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
<s>_list([[x] * N for x in originals])
- requests = join_list([[r] * N for r in requests])
if (pred_scores := gen_out.get("sequences_scores", None)) is None:
pred_scores = [0.0] * len(out_tks)
if use_sampling:
pred_weights = [1.0 / N] * len(out_tks)
else:
pred_weights = [math.exp(x) for x in pred_scores]
with timed("assemble changes"):
pred_changes = list[Modified[str]]()
+ for change_tks, out in zip(originals, out_tks):
- for req, out in zip(requests, out_tks):
- change = req.target.map(lambda x: x.code)
- change_tks = change_to_tokens(change)
- pred = apply_output_tks_to_change(change_tks, req.respect_lines, out)
<0> pred_changes.append(pred)
assert_eq(len(pred_changes), len(out_tks), len(pred_scores))
solutions = list[list[PredictedChange]]()
for i in range(0, len(pred_changes), N):
sols = marginalize_preds(
pred_changes[i : i + N],
out_tks[i : i + N],
pred_weights[i : i + N],
pred_scores[i : i + N],
)
solutions.append(sols[:n_solutions])
return solutions
| ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ originals: Sequence[TokenSeq],
- requests: Sequence["EditRequest"],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -1
<s>:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args()
input_ids = batch["input_ids"]
if not isinstance(input_ids, torch.LongTensor):
input_ids = torch.LongTensor(input_ids)
with timed("model.generate"), tqdm(total=dec_args.max_output_tks) as pbar:
gen_out = self.generate(
input_ids.to(self.device),
references=batch["references"],
query_ref_list=batch["query_ref_list"],
num_return_sequences=N,
return_dict_in_generate=True,
output_scores=True,
**gen_args,
tqdm=pbar,
)
assert not isinstance(gen_out, torch.LongTensor)
out_tks = gen_out["sequences"]
if isinstance(out_tks, torch.Tensor):
out_tks = out_tks.tolist()
out_tks = [remove_pad_ids(x) for x in out_tks]
assert isinstance(out_tks, list)
logging.debug("Max out length:", max(len(x) for x in out_tks))
+ assert_eq(len(out_tks), len(originals) * N)
- assert_eq(len(out_tks), len(requests) * N)
+ originals = join_list([[x] * N for x in originals])
- requests = join_list([[r] * N for r</s>
===========above chunk 1===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ originals: Sequence[TokenSeq],
- requests: Sequence["EditRequest"],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -2
<s>[TokenSeq],
weights: Sequence[float],
scores: Sequence[float],
) -> list[PredictedChange]:
"""For sampling techniques, all sample should have equal weights 1/N. For
search-based techniques, the `weights` should equal to the solutions' probabilities."""
assert preds
groups = groupby(
range(len(preds)),
keyfunc=lambda i: normalize_code_by_ast(preds[i].after),
)
groups = list(groups.values())
for group in groups:
# within each group, sort by score
group.sort(key=lambda i: scores[i], reverse=True)
groups.sort(
key=lambda g: (sum(weights[i] for i in g), scores[g[0]]), reverse=True
)
return [
PredictedChange(
preds[g[0]], out_tks[g[0]], sum(weights[i] for i in g), len(g)
)
for g in groups
]
use_sampling = dec_args.marginalize_samples > 1
if use_sampling:
assert_eq(dec_args.do_sample, True)
assert_eq(dec_args.num_beams, 1)
N = dec_args.marginalize_samples
else:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args</s>
===========above chunk 2===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ originals: Sequence[TokenSeq],
- requests: Sequence["EditRequest"],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -3
"""
Returns nested list of shape `(batch_size, n_solutions)`.
"""
timed = self.tlogger.timed
def marginalize_preds(
preds: Sequence[Modified[str]],
out_tks</s>
===========unchanged ref 0===========
at: coeditor._utils
groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
after: E1
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
normalize_code_by_ast(code: str, sort_keyargs: bool=True, remove_doc_string: bool=True) -> str
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
remove_pad_ids(ids: TokenSeq) -> TokenSeq
PredictedChange(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
to_model_args() -> dict
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
|
coeditor.encoding/TkDelta.from_output_tks | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
- def from_output_tks(tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(result)
segs = output_ids_as_seqs(tks)
- deltas = {
- extra_id_to_number(k): seg_to_tuple(seg) for k, seg in segs.items() if seg
- }
+ assert_eq(len(segs), len(lines))
<0> return TkDelta(deltas)
| ===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
|
coeditor.scoped_changes/line_range | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> return LineRange(start, end)
| # module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
- return LineRange((start, end))
<0>
| ===========unchanged ref 0===========
at: coeditor.scoped_changes.LineRange
start: int
===========changed ref 0===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 1===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 2===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
- def from_output_tks(tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(result)
segs = output_ids_as_seqs(tks)
- deltas = {
- extra_id_to_number(k): seg_to_tuple(seg) for k, seg in segs.items() if seg
- }
+ assert_eq(len(segs), len(lines))
+ deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/ChangeScope._search_span | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> if line in span.line_range:
| # module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
- if span.line_range[0] <= line < span.line_range[1]:
<0> return span
return None
| ===========unchanged ref 0===========
at: coeditor.common
ElemPath = str
at: coeditor.scoped_changes.ChangeScope
path: ProjectPath
tree: ScopeTree
spans: Sequence["StatementSpan"]
subscopes: Mapping[str, Self]
parent_scope: "ChangeScope | None"
_search_scope(path: ElemPath) -> Self
at: coeditor.scoped_changes.ChangeScope.search_span_by_line
span = s.search_span_by_line(line)
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+
===========changed ref 1===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 2===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 3===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 4===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 5===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 6===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
- def from_output_tks(tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(result)
segs = output_ids_as_seqs(tks)
- deltas = {
- extra_id_to_number(k): seg_to_tuple(seg) for k, seg in segs.items() if seg
- }
+ assert_eq(len(segs), len(lines))
+ deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/ChangedSpan.__repr__ | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> return f"ChangeSpan(module={self.module}, range={self.line_range}, scope={self.scope.earlier.path.path}, type={self.change.as_char()})"
| # module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
def __repr__(self) -> str:
- return f"ChangeSpan(scope={self.path}, range={self.line_range}, type={self.change.as_char()})"
<0>
| ===========unchanged ref 0===========
at: coeditor.common
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 3===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+
===========changed ref 4===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 5===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 6===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 7===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 8===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 9===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
- def from_output_tks(tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(result)
segs = output_ids_as_seqs(tks)
- deltas = {
- extra_id_to_number(k): seg_to_tuple(seg) for k, seg in segs.items() if seg
- }
+ assert_eq(len(segs), len(lines))
+ deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/JModuleChange.__repr__ | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> return f"JModuleChange({self.changed})"
| # module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
def __repr__(self) -> str:
- change_dict = {k.path: v.change.as_char() for k, v in self.changed.items()}
- return f"JModuleChange({change_dict})"
<0>
| ===========unchanged ref 0===========
at: coeditor.change.Added
after: E1
as_char()
at: coeditor.change.Deleted
before: E1
as_char()
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
as_char()
at: coeditor.common.ProjectPath
module: ModuleName
path: ElemPath
at: coeditor.scoped_changes.ChangeScope
path: ProjectPath
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 3===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass
class StatementSpan:
+ def __repr__(self):
+ preview = self.code
+ str_limit = 30
+ if len(preview) > str_limit:
+ preview = preview[:str_limit] + "..."
+ return f"StatementSpan({self.line_range}, code={repr(preview)})"
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
def __repr__(self) -> str:
+ return f"ChangeSpan(module={self.module}, range={self.line_range}, scope={self.scope.earlier.path.path}, type={self.change.as_char()})"
- return f"ChangeSpan(scope={self.path}, range={self.line_range}, type={self.change.as_char()})"
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 8===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+
===========changed ref 9===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 10===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 11===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 12===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 13===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 14===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
- def from_output_tks(tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(result)
segs = output_ids_as_seqs(tks)
- deltas = {
- extra_id_to_number(k): seg_to_tuple(seg) for k, seg in segs.items() if seg
- }
+ assert_eq(len(segs), len(lines))
+ deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/code_to_module | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> return parso.parse(code)
| # module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
<0>
| ===========unchanged ref 0===========
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 3===========
# module: coeditor.scoped_changes
+ def _parse_module_script(project: Path, path: Path):
+ assert path.is_absolute(), f"Path is not absolute: {path=}"
+ script = jedi.Script(path=path, project=project)
+ mcontext = script._get_module_context()
+ assert isinstance(mcontext, ModuleContext)
+ mname = cast(str, mcontext.py__name__())
+ if mname.startswith("src."):
+ e = ValueError(f"Bad module name: {mname}")
+ files = list(project.iterdir())
+ print_err(f"project: {project}", file=sys.stderr)
+ print_err(f"files in root: {files}", file=sys.stderr)
+ raise e
+ m = script._module_node
+ assert isinstance(m, ptree.Module)
+ # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
+ # m = parso.parse(path.read_text())
+ jmod = JModule(mname, m)
+ return jmod, script
+
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
def __repr__(self) -> str:
- change_dict = {k.path: v.change.as_char() for k, v in self.changed.items()}
+ return f"JModuleChange({self.changed})"
- return f"JModuleChange({change_dict})"
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass
class StatementSpan:
+ def __repr__(self):
+ preview = self.code
+ str_limit = 30
+ if len(preview) > str_limit:
+ preview = preview[:str_limit] + "..."
+ return f"StatementSpan({self.line_range}, code={repr(preview)})"
+
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
def __repr__(self) -> str:
+ return f"ChangeSpan(module={self.module}, range={self.line_range}, scope={self.scope.earlier.path.path}, type={self.change.as_char()})"
- return f"ChangeSpan(scope={self.path}, range={self.line_range}, type={self.change.as_char()})"
===========changed ref 8===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
- changed = dict[ProjectPath, ChangedSpan]()
+ changed = get_changed_spans(
- for cspan in get_changed_spans(
module_change.map(lambda m: m.as_scope), tuple()
+ )
- ):
- path = cspan.parent_scopes[-1].earlier.path
- changed[path] = cspan
return JModuleChange(module_change, changed)
===========changed ref 9===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 10===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 11===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+
===========changed ref 12===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 13===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 14===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 15===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 16===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
|
coeditor.c3problem/C3ProblemGenerator.pre_edit_analysis | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> script, lines_to_analyze, silent=True
| # module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
def pre_edit_analysis(
self,
pstate: ProjectState,
modules: Mapping[RelPath, JModule],
changes: Mapping[ModuleName, JModuleChange],
) -> Mapping[ModuleName, LineUsageAnalysis]:
"Return the definition usages of each line."
- project = pstate.project
result = dict[ModuleName, LineUsageAnalysis]()
src_map = {m.mname: f for f, m in modules.items()}
for mname, mchange in changes.items():
if not isinstance(mchange.module_change, Modified):
continue
lines_to_analyze = set[int]()
+ for span in mchange.changed:
- for span in mchange.changed.values():
if span.change is Added:
continue
+ lines_to_analyze.update(span.line_range.to_range())
- lines_to_analyze.update(range(*span.line_range))
+ lines_to_analyze.update(span.header_line_range.to_range())
- lines_to_analyze.update(range(*span.header_line_range))
mod_path = src_map[mname]
script = pstate.scripts[mod_path]
line_usages = self.analyzer.get_line_usages(
- script, project.path, lines_to_analyze, silent=True
<0> )
result[mname] = line_usages
return result
| ===========unchanged ref 0===========
at: coeditor.c3problem
LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
at: coeditor.c3problem.C3ProblemGenerator.__init__
self.analyzer = analyzer
self._is_training: bool = False
at: coeditor.c3problem.JediUsageAnalyzer
_KnownJediErrors = {
"not enough values to unpack (expected 2",
"'Newline' object has no attribute 'children'",
"trailer_op is actually ",
"There's a scope that was not managed: <Module",
"maximum recursion depth exceeded",
"'NoneType' object has no attribute 'type'",
}
get_line_usages(script: jedi.Script, lines_to_analyze: Collection[int], silent: bool=False)
at: coeditor.change
Added(after: E1)
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.common
RelPath = NewType("RelPath", Path)
ModuleName = str
at: coeditor.scoped_changes
JModule(mname: ModuleName, tree: ptree.Module)
JModuleChange(module_change: Change[JModule], changed: Sequence[ChangedSpan])
ProjectState(project: jedi.Project, scripts: Mapping[RelPath, jedi.Script])
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
at: coeditor.scoped_changes.LineRange
start: int
until: int
to_range() -> range
===========unchanged ref 1===========
at: coeditor.scoped_changes.ProjectChangeProcessor
pre_edit_analysis(self, pstate: ProjectState, modules: Mapping[RelPath, JModule], changes: Mapping[ModuleName, JModuleChange]) -> Any
at: coeditor.scoped_changes.ProjectState
project: jedi.Project
scripts: Mapping[RelPath, jedi.Script]
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
at: typing.Mapping
items() -> AbstractSet[Tuple[_KT, _VT_co]]
===========changed ref 0===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 2===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
"""
### Change log
+ - v2.6: fix missing changes in `JModuleChanges`.
- v2.5: fix newline encoding bug.
- v2.4: fix buggy encoding of `Added` and `Deleted` changes.
- v2.3: always generate problems with full editing range and move the problem
splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`.
"""
+ VERSION = "2.6"
- VERSION = "2.5"
# change spans with more than this many lines will be ignored
max_span_lines: int = 500
===========changed ref 3===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 5===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 8===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
===========changed ref 9===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 10===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 11===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 12===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
def __repr__(self) -> str:
- change_dict = {k.path: v.change.as_char() for k, v in self.changed.items()}
+ return f"JModuleChange({self.changed})"
- return f"JModuleChange({change_dict})"
===========changed ref 13===========
# module: coeditor.scoped_changes
@dataclass
class StatementSpan:
+ def __repr__(self):
+ preview = self.code
+ str_limit = 30
+ if len(preview) > str_limit:
+ preview = preview[:str_limit] + "..."
+ return f"StatementSpan({self.line_range}, code={repr(preview)})"
+
===========changed ref 14===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 15===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+ |
coeditor.model/C3DataLoader._to_tokenized | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> max_workers=self.workers,
| # module: coeditor.model
@dataclass
class C3DataLoader:
def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]:
probs = list(probs)
if self.transform is not None:
# we can afford to store all transformed problems beforehand
+ probs = join_list(
+ pmap(
+ self.transform.transform,
+ probs,
+ chunksize=500,
+ max_workers=self.workers,
+ )
+ )
- probs = join_list(pmap(self.transform.transform, probs, chunksize=500))
if self.shuffle:
# we need to shuffle after the transform to help serialization
# this also mixes the problems better
random.shuffle(probs)
for i in range(0, len(probs), self.chunk_size):
# we can only afford to tokenize the problems on-the-fly
group = probs[i : i + self.chunk_size]
yield from pmap(
self.tokenizer.tokenize_problem,
group,
tqdm_args={"disable": True},
<0> )
| ===========unchanged ref 0===========
at: coeditor._utils
pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
at: coeditor.c3problem.C3ProblemTransform
transform(prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.common
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
at: random
shuffle = _inst.shuffle
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.model
@dataclass
class C3DataLoader:
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
+ workers: int = DefaultWorkers
===========changed ref 1===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 3===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 4===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 8===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
===========changed ref 9===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 10===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 11===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 12===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
def __repr__(self) -> str:
- change_dict = {k.path: v.change.as_char() for k, v in self.changed.items()}
+ return f"JModuleChange({self.changed})"
- return f"JModuleChange({change_dict})"
===========changed ref 13===========
# module: coeditor.scoped_changes
@dataclass
class StatementSpan:
+ def __repr__(self):
+ preview = self.code
+ str_limit = 30
+ if len(preview) > str_limit:
+ preview = preview[:str_limit] + "..."
+ return f"StatementSpan({self.line_range}, code={repr(preview)})"
+
===========changed ref 14===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 15===========
# module: coeditor.c3problem
+ class C3GeneratorCache:
+ def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ self.header_cache = dict[ProjectPath, ChangedHeader]()
+ self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ self.module_map = pre_module_map
+ self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
+
===========changed ref 16===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
+ def search_span_by_line(self, line: int) -> "StatementSpan | None":
+ # TODO: optimize this to avoid linear scan
+ span = self._search_span(line)
+ if span is not None:
+ return span
+ for s in self.subscopes.values():
+ span = s.search_span_by_line(line)
+ if span is not None:
+ return span
+
===========changed ref 17===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
def __repr__(self) -> str:
+ return f"ChangeSpan(module={self.module}, range={self.line_range}, scope={self.scope.earlier.path.path}, type={self.change.as_char()})"
- return f"ChangeSpan(scope={self.path}, range={self.line_range}, type={self.change.as_char()})"
|
coeditor.model/C3DataLoader._problems_to_batches | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> yield self.pack_batch(current_batch)
| # module: coeditor.model
@dataclass
class C3DataLoader:
def _problems_to_batches(self, problems: Iterable[TkC3Problem]) -> Iterable[dict]:
<s> current_batch = []
current_cost = 0
for tk_prob in problems:
all_refs = [x[1] for x in tk_prob.named_references]
ref_size_sum = sum(len(ref) for ref in all_refs)
assert ref_size_sum <= tkn.max_ref_tks_sum, f"{ref_size_sum=}"
input_tks, output_tks = self._post_process(tk_prob)
cost = retrieval_cost_model(
ref_size=ref_size_sum,
query_size=len(input_tks),
output_size=len(output_tks),
)
- row = {
- "input_tks": input_tks,
- "output_tks": output_tks,
- "references": [x.tolist() for x in all_refs],
+ tk_prob = dataclasses.replace(
+ tk_prob, input_tks=input_tks, output_tks=output_tks
+ )
- }
if cost > cost_limit and not warned_batch_size:
warned_batch_size = True
warnings.warn("Batch cost limit is too small.")
if (not current_batch) or (
cost + current_cost <= cost_limit
and len(current_batch) < self.batch_args.max_queries
):
+ current_batch.append(tk_prob)
- current_batch.append(row)
current_cost += cost
else:
+ yield self.pack_batch(current_batch)
- yield pack_batch(current_batch)
+ current_batch = [tk_prob]
- current_batch = [row]
current_cost = cost
if current_batch:
- yield pack_batch(current_batch)
<0>
| ===========above chunk 0===========
# module: coeditor.model
@dataclass
class C3DataLoader:
def _problems_to_batches(self, problems: Iterable[TkC3Problem]) -> Iterable[dict]:
# offset: -1
- def pack_batch(rows: list[dict]):
- assert rows, "empty batch found"
- input_ids = [x["input_tks"] for x in rows]
- labels = [x["output_tks"] for x in rows]
- refs = [x["references"] for x in rows]
- id2ref = {id(ref): ref for row in refs for ref in row}
- references = [id2ref[x] for x in id2ref]
- id2order = {x: i for i, x in enumerate(id2ref)}
- query_ref_list = [[id2order[id(ref)] for ref in row] for row in refs]
- return {
- "input_ids": input_ids,
- "references": references,
- "query_ref_list": query_ref_list,
- "labels": labels,
- }
-
tkn = self.tokenizer
cost_limit = self._cost_limit()
warned_batch_size = False
# sample references for each query
+ current_batch = list[TkC3Problem]()
- current_batch = []
current_cost = 0
for tk_prob in problems:
all_refs = [x</s>
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
truncated: bool
at: coeditor.model
retrieval_cost_model(ref_size: int, query_size: int, output_size: int) -> float
at: coeditor.model.BatchArgs
min_queries: int = 1
max_queries: int = 8
shuffle_extra_ids: bool = True
at: coeditor.model.C3DataLoader
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
===========unchanged ref 1===========
_post_process(e: TkC3Problem)
at: coeditor.model.C3DataLoader._post_process
labels = wrap_bos(labels)
labels = [id_map.get(tk, tk) for tk in labels]
labels = labels[:max_output_tks]
input_ids = e.input_tks
input_ids = [id_map.get(tk, tk) for tk in input_ids]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.model
@dataclass
class C3DataLoader:
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
+ workers: int = DefaultWorkers
===========changed ref 1===========
# module: coeditor.model
@dataclass
class C3DataLoader:
def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]:
probs = list(probs)
if self.transform is not None:
# we can afford to store all transformed problems beforehand
+ probs = join_list(
+ pmap(
+ self.transform.transform,
+ probs,
+ chunksize=500,
+ max_workers=self.workers,
+ )
+ )
- probs = join_list(pmap(self.transform.transform, probs, chunksize=500))
if self.shuffle:
# we need to shuffle after the transform to help serialization
# this also mixes the problems better
random.shuffle(probs)
for i in range(0, len(probs), self.chunk_size):
# we can only afford to tokenize the problems on-the-fly
group = probs[i : i + self.chunk_size]
yield from pmap(
self.tokenizer.tokenize_problem,
group,
tqdm_args={"disable": True},
+ max_workers=self.workers,
)
===========changed ref 2===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 3===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 4===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 5===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 8===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 9===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
===========changed ref 10===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
|
tests.test_edits/TestChangeIdentities.test_delta_decomposition | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> print_err(c.earlier)
| # module: tests.test_edits
class TestChangeIdentities:
def test_delta_decomposition(self):
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
expect = delta.apply_to_input(original)
keys = tuple(delta.keys())
+ for _ in range(50):
- for _ in range(10):
n_keys = int(len(keys) * random.random())
sub_keys = random_subset(keys, n_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
if step2 != expect:
print_err(f"{sub_keys=}")
+ print_err("earlier", SEP)
<0> print_err("Original", SEP)
print_err(decode_tokens(original))
print_err("Expect", SEP)
print_err(decode_tokens(expect))
print_err("delta1", SEP)
print_err(delta1)
print_err("step1", SEP)
print_err(decode_tokens(step1))
print_err("delta2", SEP)
print_err(delta2)
print_err("step2", SEP)
print_err(decode_tokens(step2))
raise AssertionError("Failed for case: " + name)
| ===========unchanged ref 0===========
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
SEP = "-" * 80
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]
at: random
random = _inst.random
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
y = "<del>\tx"
return x + y
"""
),
dedent(
"""\
# new comment 1</s>
===========changed ref 0===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 2===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 3===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 7===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
===========changed ref 8===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 9===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
===========changed ref 10===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def __repr__(self):
+ return (
+ f"ChangeScope(path={self.path}, type={self.tree.type}, spans={self.spans})"
- return f"ChangeScope(path={self.path}, type={self.tree.type})"
+ )
===========changed ref 11===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
def __repr__(self) -> str:
- change_dict = {k.path: v.change.as_char() for k, v in self.changed.items()}
+ return f"JModuleChange({self.changed})"
- return f"JModuleChange({change_dict})"
===========changed ref 12===========
# module: coeditor.scoped_changes
@dataclass
class StatementSpan:
+ def __repr__(self):
+ preview = self.code
+ str_limit = 30
+ if len(preview) > str_limit:
+ preview = preview[:str_limit] + "..."
+ return f"StatementSpan({self.line_range}, code={repr(preview)})"
+
===========changed ref 13===========
# module: coeditor.scoped_changes
ScopeTree = ptree.Function | ptree.Class | ptree.Module
PyNode = ptree.PythonBaseNode | ptree.PythonNode
- LineRange = NewType("LineRange", tuple[int, int])
_tlogger = TimeLogger()
===========changed ref 14===========
# module: coeditor.c3problem
+ class C3GeneratorCache:
+ def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ self.header_cache = dict[ProjectPath, ChangedHeader]()
+ self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ self.module_map = pre_module_map
+ self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
+ |
tests.test_analysis/test_anlayzing_defs | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> analysis = analyzer.get_line_usages(script, range(0, 46), silent=True)
| # module: tests.test_analysis
def test_anlayzing_defs():
analyzer = JediUsageAnalyzer()
project = jedi.Project(path=testcase_root, added_sys_path=[proj_root() / "src"])
script = jedi.Script(path=testcase_root / "defs.py", project=project)
- analysis = analyzer.get_line_usages(
- script, testcase_root, range(0, 46), silent=True
- )
<0>
if analyzer.error_counts:
raise RuntimeError(f"Errors found: {analyzer.error_counts}")
assert_has_usages(
analysis.line2usages[10],
"defs.ScopeTree",
"parso.python.tree.Function",
"parso.python.tree.Class",
"parso.python.tree.Module",
)
assert_has_usages(
analysis.line2usages[21],
"defs.ChangeScope.path",
"coeditor.common.ProjectPath",
)
assert_has_usages(
analysis.line2usages[22],
"defs.ChangeScope.tree",
"defs.ScopeTree",
)
assert_has_usages(
analysis.line2usages[23],
"defs.ChangeScope.spans",
"typing.Sequence",
)
assert_has_usages(
analysis.line2usages[24],
"typing.Mapping",
"coeditor.common.ProjectPath",
)
assert_has_usages(
analysis.line2usages[28],
"defs.ChangeScope.spans",
)
assert_has_usages(
analysis.line2usages[31],
"coeditor.common.ProjectPath",
"defs.ScopeTree",
# "defs.ChangeScope", # couldn't handle string annotations for now
)
assert_has_usages(
analysis.line2usages[40],
</s> | ===========below chunk 0===========
# module: tests.test_analysis
def test_anlayzing_defs():
# offset: 1
<s> string annotations for now
)
assert_has_usages(
analysis.line2usages[40],
"parso.tree.BaseNode.__init__.children",
)
assert_has_usages(
analysis.line2usages[42],
"parso.python.tree.PythonNode",
"parso.python.tree.Scope.get_suite",
# "parso.python.tree.BaseNode.children",
)
===========unchanged ref 0===========
at: coeditor._utils
proj_root() -> Path
at: coeditor.c3problem
JediUsageAnalyzer(include_parent_usages: bool=True, include_builtins: bool=False)
at: coeditor.c3problem.JediUsageAnalyzer
include_parent_usages: bool = True
include_builtins: bool = False
_KnownJediErrors = {
"not enough values to unpack (expected 2",
"'Newline' object has no attribute 'children'",
"trailer_op is actually ",
"There's a scope that was not managed: <Module",
"maximum recursion depth exceeded",
"'NoneType' object has no attribute 'type'",
}
get_line_usages(script: jedi.Script, lines_to_analyze: Collection[int], silent: bool=False)
at: coeditor.c3problem.JediUsageAnalyzer.__post_init__
self.error_counts = dict[str, int]()
at: coeditor.c3problem.LineUsageAnalysis
line2usages: Mapping[int, Sequence[PyDefinition]]
at: jedi.api
Script(code=None, *, path=None, environment=None, project=None)
at: jedi.api.project
Project(path, *, environment_path=None, load_unsafe_extensions=False, sys_path=None, added_sys_path=(), smart_sys_path=True)
at: tests.test_analysis
testcase_root = Path(__file__).parent / "testcases"
assert_has_usages(defs: Collection[PyDefinition], *full_names: str)
===========changed ref 0===========
+ # module: coeditor.service
+
+
===========changed ref 1===========
+ # module: coeditor.service
+ ErrorStr = str
+
===========changed ref 2===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 3===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 4===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 5===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 6===========
+ # module: coeditor.service
+ def show_location(loc: CodePosition):
+ return f"{loc[0]}:{loc[1]}"
+
===========changed ref 7===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def _get_mod_time(self, path: RelPath) -> SysTime:
+ return os.stat(self.project / path).st_mtime
+
===========changed ref 8===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 9===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 10===========
+ # module: coeditor.service
+ @dataclass
+ class EditSuggestion:
+ score: float
+ change_preview: str
+ new_code: str
+
===========changed ref 11===========
+ # module: coeditor.service
+ _tlogger = TimeLogger()
+
+ CommitHash = str
+ SysTime = float
+
===========changed ref 12===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def _get_index_content(self, path: RelPath):
+ return file_content_from_commit(self.project, "", path.as_posix())
+
===========changed ref 13===========
+ # module: coeditor.service
+ @dataclass
+ class ServiceResponse:
+ def __str__(self) -> str:
+ # use the print above
+ s = io.StringIO()
+ self.print(s)
+ return s.getvalue()
+
===========changed ref 14===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 15===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def get_current_modules(self) -> dict[RelPath, JModule]:
+ files = get_python_files(self.project)
+ return {f: self.get_current_module(f) for f in files}
+
===========changed ref 16===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ project: Path
+ untracked_as_additions: bool = True
+ ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
+
===========changed ref 17===========
+ # module: coeditor.service
+ @dataclass
+ class EditSuggestion:
+ def to_json(self):
+ return {
+ "score": self.score,
+ "change_preview": self.change_preview,
+ "new_code": self.new_code,
+ }
+
===========changed ref 18===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
===========changed ref 19===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def get_current_module(self, path: RelPath) -> JModule:
+ stamp = self._get_mod_time(path)
+ return self._now_cache.cached(
+ path, stamp, lambda: self._parse_current_module(path)
+ )
+
===========changed ref 20===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def _get_index_module(self, path: RelPath) -> JModule:
+ stamp = self._get_index_stamp(path)
+ return self._index_cache.cached(
+ path, stamp, lambda: self._parse_index_module(path)
+ )
+
===========changed ref 21===========
+ # module: coeditor.service
+ @dataclass
+ class ServiceResponse:
+ target_file: str
+ edit_start: tuple[int, int]
+ edit_end: tuple[int, int]
+ old_code: str
+ suggestions: list[EditSuggestion]
+
===========changed ref 22===========
# module: coeditor.scoped_changes
@dataclass
class ChangeScope:
def _search_span(self, line: int) -> "StatementSpan | None":
for span in self.spans:
+ if line in span.line_range:
- if span.line_range[0] <= line < span.line_range[1]:
return span
return None
===========changed ref 23===========
# module: coeditor.scoped_changes
def line_range(start: int, end: int, can_be_empty: bool = False) -> LineRange:
if not can_be_empty and start >= end:
raise ValueError(f"Bad line range: {start=}, {end=}")
+ return LineRange(start, end)
- return LineRange((start, end))
|
tests.test_analysis/test_anlayzing_usages | Modified | temp-1 | 126ebdc082f96cc4670cb37357e850d259b00c68 | Initial implementation of new service. - Make LineRange a class. - Fix JModuleChanges. - Seperate out some C3 generator logic. | <0>:<add> analysis = analyzer.get_line_usages(script, range(0, 63), silent=True)
| # module: tests.test_analysis
def test_anlayzing_usages():
analyzer = JediUsageAnalyzer()
project = jedi.Project(path=testcase_root, added_sys_path=[proj_root() / "src"])
script = jedi.Script(path=testcase_root / "usages.py", project=project)
- analysis = analyzer.get_line_usages(
- script, testcase_root, range(0, 63), silent=True
- )
<0>
if analyzer.error_counts:
raise RuntimeError(f"Errors found: {analyzer.error_counts}")
assert_has_usages(
analysis.line2usages[11],
"usages.JModule.tree",
"parso.python.tree.Module",
)
assert_has_usages(
analysis.line2usages[13],
"usages.JModule._to_scope",
"defs.ChangeScope",
)
assert_has_usages(
analysis.line2usages[14],
"usages.JModule.mname",
"usages.JModule.tree",
"defs.ChangeScope",
"defs.ChangeScope.from_tree",
"coeditor.common.ProjectPath",
)
assert_has_usages(
analysis.line2usages[19],
"usages.JModule.iter_imports",
)
assert_has_usages(
analysis.line2usages[21],
# "parso.python.tree.ImportFrom.get_from_names",
)
assert_has_usages(
analysis.line2usages[34],
"coeditor._utils.as_any",
)
| ===========unchanged ref 0===========
at: coeditor._utils
proj_root() -> Path
at: coeditor.c3problem.JediUsageAnalyzer
get_line_usages(script: jedi.Script, lines_to_analyze: Collection[int], silent: bool=False)
at: coeditor.c3problem.JediUsageAnalyzer.__post_init__
self.error_counts = dict[str, int]()
at: coeditor.c3problem.LineUsageAnalysis
line2usages: Mapping[int, Sequence[PyDefinition]]
at: jedi.api
Script(code=None, *, path=None, environment=None, project=None)
at: jedi.api.project
Project(path, *, environment_path=None, load_unsafe_extensions=False, sys_path=None, added_sys_path=(), smart_sys_path=True)
at: tests.test_analysis
testcase_root = Path(__file__).parent / "testcases"
assert_has_usages(defs: Collection[PyDefinition], *full_names: str)
at: tests.test_analysis.test_anlayzing_usages
analyzer = JediUsageAnalyzer()
===========changed ref 0===========
# module: tests.test_analysis
def test_anlayzing_defs():
analyzer = JediUsageAnalyzer()
project = jedi.Project(path=testcase_root, added_sys_path=[proj_root() / "src"])
script = jedi.Script(path=testcase_root / "defs.py", project=project)
- analysis = analyzer.get_line_usages(
- script, testcase_root, range(0, 46), silent=True
- )
+ analysis = analyzer.get_line_usages(script, range(0, 46), silent=True)
if analyzer.error_counts:
raise RuntimeError(f"Errors found: {analyzer.error_counts}")
assert_has_usages(
analysis.line2usages[10],
"defs.ScopeTree",
"parso.python.tree.Function",
"parso.python.tree.Class",
"parso.python.tree.Module",
)
assert_has_usages(
analysis.line2usages[21],
"defs.ChangeScope.path",
"coeditor.common.ProjectPath",
)
assert_has_usages(
analysis.line2usages[22],
"defs.ChangeScope.tree",
"defs.ScopeTree",
)
assert_has_usages(
analysis.line2usages[23],
"defs.ChangeScope.spans",
"typing.Sequence",
)
assert_has_usages(
analysis.line2usages[24],
"typing.Mapping",
"coeditor.common.ProjectPath",
)
assert_has_usages(
analysis.line2usages[28],
"defs.ChangeScope.spans",
)
assert_has_usages(
analysis.line2usages[31],
"coeditor.common.ProjectPath",
"defs.ScopeTree",
# "defs.ChangeScope", # couldn't handle string annotations for now
</s>
===========changed ref 1===========
# module: tests.test_analysis
def test_anlayzing_defs():
# offset: 1
<s>ProjectPath",
"defs.ScopeTree",
# "defs.ChangeScope", # couldn't handle string annotations for now
)
assert_has_usages(
analysis.line2usages[40],
"parso.tree.BaseNode.__init__.children",
)
assert_has_usages(
analysis.line2usages[42],
"parso.python.tree.PythonNode",
"parso.python.tree.Scope.get_suite",
# "parso.python.tree.BaseNode.children",
)
===========changed ref 2===========
+ # module: coeditor.service
+
+
===========changed ref 3===========
+ # module: coeditor.service
+ ErrorStr = str
+
===========changed ref 4===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ start: int
+ until: int
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def scope(self) -> Change[ChangeScope]:
+ return self.parent_scopes[-1]
+
===========changed ref 6===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def __contains__(self, l: int) -> bool:
+ return self.start <= l < self.until
+
===========changed ref 7===========
# module: coeditor.scoped_changes
+ class LineRange(NamedTuple):
+ def to_range(self) -> range:
+ return range(self.start, self.until)
+
===========changed ref 8===========
+ # module: coeditor.service
+ def show_location(loc: CodePosition):
+ return f"{loc[0]}:{loc[1]}"
+
===========changed ref 9===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def _get_mod_time(self, path: RelPath) -> SysTime:
+ return os.stat(self.project / path).st_mtime
+
===========changed ref 10===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
- @property
- def path(self) -> ProjectPath:
- return self.parent_scopes[-1].earlier.path
-
===========changed ref 11===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ @property
+ def module(self) -> ModuleName:
+ return self.parent_scopes[-1].earlier.path.module
+
===========changed ref 12===========
+ # module: coeditor.service
+ @dataclass
+ class EditSuggestion:
+ score: float
+ change_preview: str
+ new_code: str
+
===========changed ref 13===========
+ # module: coeditor.service
+ _tlogger = TimeLogger()
+
+ CommitHash = str
+ SysTime = float
+
===========changed ref 14===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def _get_index_content(self, path: RelPath):
+ return file_content_from_commit(self.project, "", path.as_posix())
+
===========changed ref 15===========
+ # module: coeditor.service
+ @dataclass
+ class ServiceResponse:
+ def __str__(self) -> str:
+ # use the print above
+ s = io.StringIO()
+ self.print(s)
+ return s.getvalue()
+
===========changed ref 16===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
module_change: Change[JModule]
+ changed: Sequence[ChangedSpan]
- changed: Mapping[ProjectPath, ChangedSpan]
===========changed ref 17===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ def get_current_modules(self) -> dict[RelPath, JModule]:
+ files = get_python_files(self.project)
+ return {f: self.get_current_module(f) for f in files}
+
===========changed ref 18===========
+ # module: coeditor.service
+ @dataclass
+ class ChangeDetector:
+ project: Path
+ untracked_as_additions: bool = True
+ ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
+
===========changed ref 19===========
+ # module: coeditor.service
+ @dataclass
+ class EditSuggestion:
+ def to_json(self):
+ return {
+ "score": self.score,
+ "change_preview": self.change_preview,
+ "new_code": self.new_code,
+ }
+
===========changed ref 20===========
# module: coeditor.scoped_changes
def code_to_module(code: str) -> ptree.Module:
- m = jedi.Script(code)._module_node
- assert isinstance(m, ptree.Module)
- return m
+ return parso.parse(code)
|
coeditor.encoding/TkDelta.from_output_tks | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> return tuple(remove_newline(x) for x in result)
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
- return tuple(result)
<0>
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 1===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 2===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+ |
coeditor.scoped_changes/JModuleChange.from_modules | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
| # module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
- module_change.map(lambda m: m.as_scope), tuple()
<0> )
return JModuleChange(module_change, changed)
| ===========unchanged ref 0===========
at: coeditor.change
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JModule(mname: ModuleName, tree: ptree.Module)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 1===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 2===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 3===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
+ return tuple(remove_newline(x) for x in result)
- return tuple(result)
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/_parse_module_script | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> print_err(f"project: {project.path}", file=sys.stderr)
| # module: coeditor.scoped_changes
+ def _parse_module_script(project: jedi.Project, path: Path):
- def _parse_module_script(project: Path, path: Path):
assert path.is_absolute(), f"Path is not absolute: {path=}"
script = jedi.Script(path=path, project=project)
mcontext = script._get_module_context()
assert isinstance(mcontext, ModuleContext)
mname = cast(str, mcontext.py__name__())
if mname.startswith("src."):
e = ValueError(f"Bad module name: {mname}")
+ files = list(project.path.iterdir())
- files = list(project.iterdir())
- print_err(f"project: {project}", file=sys.stderr)
<0> print_err(f"files in root: {files}", file=sys.stderr)
raise e
m = script._module_node
assert isinstance(m, ptree.Module)
# mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
# m = parso.parse(path.read_text())
jmod = JModule(mname, m)
return jmod, script
| ===========unchanged ref 0===========
at: coeditor.common
T1 = TypeVar("T1")
T2 = TypeVar("T2")
at: copy
deepcopy(x: _T, memo: Optional[Dict[int, Any]]=..., _nil: Any=...) -> _T
at: jedi.api
Script(code=None, *, path=None, environment=None, project=None)
at: jedi.api.Script
_get_module_context()
at: jedi.api.project
Project(path, *, environment_path=None, load_unsafe_extensions=False, sys_path=None, added_sys_path=(), smart_sys_path=True)
at: jedi.inference.context
ModuleContext()
at: pathlib
Path()
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
is_absolute() -> bool
at: typing
Collection = _alias(collections.abc.Collection, 1)
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
+ module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
- module_change.map(lambda m: m.as_scope), tuple()
)
return JModuleChange(module_change, changed)
===========changed ref 3===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 4===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 5===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 6===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
+ return tuple(remove_newline(x) for x in result)
- return tuple(result)
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.scoped_changes/get_changed_spans | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> if only_ast_changes and code_equal(old_scope.spans_code, new_scope.spans_code):
| # module: coeditor.scoped_changes
def get_changed_spans(
scope_change: Change[ChangeScope],
parent_changes: tuple[Change[ChangeScope], ...] = (),
+ only_ast_changes: bool = True,
) -> list[ChangedSpan]:
"""
Extract the change spans from scope change.
- We need a tree differencing algorithm that are robust to element movements.
- To compute the changes to each statement region, we can compute the differences
by concatenating all the regions before and after the edit
(and hiding all the sub spans such as class methods), then map the changes
to each line back to the original regions.
+
+ ## Args:
+ - `only_ast_changes`: if True, will skip the changes that are just caused by
+ comments or formatting changes.
"""
def get_modified_spans(
old_scope: ChangeScope,
new_scope: ChangeScope,
parent_changes: Sequence[Change[ChangeScope]],
) -> Iterable[ChangedSpan]:
- if code_equal(old_scope.spans_code, new_scope.spans_code):
<0> return
diffs = change_to_line_diffs(
Modified(old_scope.spans_code, new_scope.spans_code)
)
original, delta = line_diffs_to_original_delta(diffs)
line = 0
for span in old_scope.spans:
code = span.code
line_range = (line, line + count_lines(code))
if subdelta := delta.for_input_range(line_range).shifted(-line):
new_code = subdelta.apply_to_input(code)
change = Modified(code, new_code)
yield ChangedSpan(
change,
parent_changes,
span.line_range,
)
line = line_range[1]
def recurse(
scope_change: Change[ChangeScope], parent_changes
) -> Iterable[ChangedSpan]:
parent_changes = (*parent_changes, scope_change)
</s> | ===========below chunk 0===========
# module: coeditor.scoped_changes
def get_changed_spans(
scope_change: Change[ChangeScope],
parent_changes: tuple[Change[ChangeScope], ...] = (),
+ only_ast_changes: bool = True,
) -> list[ChangedSpan]:
# offset: 1
<s>], parent_changes
) -> Iterable[ChangedSpan]:
parent_changes = (*parent_changes, scope_change)
match scope_change:
case Modified(old_scope, new_scope):
# compute statement differences
yield from get_modified_spans(old_scope, new_scope, parent_changes)
for sub_change in get_named_changes(
old_scope.subscopes, new_scope.subscopes
).values():
yield from recurse(sub_change, parent_changes)
case Added(scope) | Deleted(scope):
for span in scope.spans:
code_change = scope_change.new_value(span.code)
yield ChangedSpan(
code_change,
parent_changes,
span.line_range,
)
for s in scope.subscopes.values():
s_change = scope_change.new_value(s)
yield from recurse(s_change, parent_changes)
spans = list(recurse(scope_change, parent_changes))
spans.sort(key=lambda s: s.line_range[0])
return spans
===========unchanged ref 0===========
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
Change = Added[E1] | Deleted[E1] | Modified[E1]
get_named_changes(old_map: Mapping[T1, T2], new_map: Mapping[T1, T2]) -> Mapping[T1, Change[T2]]
at: coeditor.common
count_lines(text: str) -> int
code_equal(code1: str, code2: str) -> bool
at: coeditor.encoding
change_to_line_diffs(change: Change[str]) -> list[str]
line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta]
at: coeditor.scoped_changes
_tlogger = TimeLogger()
ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JProjectChange(project_name: str, changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
at: coeditor.scoped_changes.ChangeScope
path: ProjectPath
tree: ScopeTree
spans: Sequence["StatementSpan"]
subscopes: Mapping[str, Self]
parent_scope: "ChangeScope | None"
at: coeditor.scoped_changes.ProjectChangeProcessor
process_change(pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any) -> Sequence[TProb]
at: coeditor.scoped_changes.StatementSpan.__post_init__
self.code: str = code + "\n"
===========unchanged ref 1===========
self.line_range: LineRange = line_range(start, end)
at: coeditor.scoped_changes._edits_from_commit_history
results = list[TProb]()
new_path2module[rel_path] = mod_new = parse_module(path)
changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_old, mod_new)
)
post_analysis = change_processor.post_edit_analysis(
pstate,
new_path2module,
changed,
)
pre_analysis = change_processor.pre_edit_analysis(
pstate,
path2module,
changed,
)
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+
===========changed ref 1===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 2===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
+ module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
- module_change.map(lambda m: m.as_scope), tuple()
)
return JModuleChange(module_change, changed)
===========changed ref 3===========
# module: coeditor.scoped_changes
+ def _parse_module_script(project: jedi.Project, path: Path):
- def _parse_module_script(project: Path, path: Path):
assert path.is_absolute(), f"Path is not absolute: {path=}"
script = jedi.Script(path=path, project=project)
mcontext = script._get_module_context()
assert isinstance(mcontext, ModuleContext)
mname = cast(str, mcontext.py__name__())
if mname.startswith("src."):
e = ValueError(f"Bad module name: {mname}")
+ files = list(project.path.iterdir())
- files = list(project.iterdir())
+ print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"project: {project}", file=sys.stderr)
print_err(f"files in root: {files}", file=sys.stderr)
raise e
m = script._module_node
assert isinstance(m, ptree.Module)
# mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
# m = parso.parse(path.read_text())
jmod = JModule(mname, m)
return jmod, script
===========changed ref 4===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 5===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 6===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+ |
coeditor.c3problem/C3GeneratorCache.create_problem | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> edit_lines, # one additional line for appending
| # module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
<s>
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
+ changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
+ if edit_lines is None:
+ edit_lines = list[int]()
+ for i, tks in enumerate(split_list(changed_code, Newline_id)):
+ if tks and tks[0] == Del_id:
+ continue
+ edit_lines.append(i)
+ code_span = dataclasses.replace(
+ code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
+ )
relevant_unchanged = self.get_relevant_unchanged(
code_span, relevant_changes, target_usages
)
- n_lines = code_span.line_range[1] - code_span.line_range[0]
prob = C3Problem(
code_span,
- range(0, n_lines + 1), # one additional line for appending
<0> relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
| ===========above chunk 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: -1
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is</s>
===========unchanged ref 0===========
at: coeditor.c3problem
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
at: coeditor.c3problem.C3GeneratorCache
get_relevant_unchanged(this_change: ChangedCodeSpan, other_changes: Collection[ChangedCodeSpan], line_usages: LineUsageAnalysis)
to_code_span(span: ChangedSpan)
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
split_list(lst: list[T1], sep: T1) -> list[list[T1]]
ModuleName = str
at: coeditor.encoding
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
===========unchanged ref 1===========
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
empty() -> "TkDelta"
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JModuleChange(module_change: Change[JModule], changed: Sequence[ChangedSpan])
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 1===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 2===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 3===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
+ module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
- module_change.map(lambda m: m.as_scope), tuple()
)
return JModuleChange(module_change, changed)
===========changed ref 6===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
+ return tuple(remove_newline(x) for x in result)
- return tuple(result)
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.service/ServiceResponse.to_json | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> "old_code": self.input_code,
| # module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
- "old_code": self.old_code,
<0> "suggestions": [s.to_json() for s in self.suggestions],
}
| ===========changed ref 0===========
# module: coeditor.service
@dataclass
class ServiceResponse:
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
+ input_code: str
- old_code: str
suggestions: list[EditSuggestion]
===========changed ref 1===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 2===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 3===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 4===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
+ module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
- module_change.map(lambda m: m.as_scope), tuple()
)
return JModuleChange(module_change, changed)
===========changed ref 7===========
# module: scripts.prepare_data
if __name__ == "__main__":
os.chdir(proj_root())
dataset_name = "xl"
generator = C3ProblemGenerator()
transform = C3ProblemChangeDropout()
with run_long_task(f"Preparing dataset {dataset_name} with encoder {generator}"):
problems = make_or_load_dataset(
+ dataset_name, generator, transform, remake_problems=False
- dataset_name, generator, transform, remake_problems=True
)
tokenizer = C3ProblemTokenizer()
for name, probs in problems.items():
probs = cast(Sequence[C3Problem], probs)
print("=" * 40, name, "=" * 40)
stats = tokenizer._compute_stats(probs)
pretty_print_dict(stats)
===========changed ref 8===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
+ return tuple(remove_newline(x) for x in result)
- return tuple(result)
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
===========changed ref 9===========
# module: coeditor.scoped_changes
+ def _parse_module_script(project: jedi.Project, path: Path):
- def _parse_module_script(project: Path, path: Path):
assert path.is_absolute(), f"Path is not absolute: {path=}"
script = jedi.Script(path=path, project=project)
mcontext = script._get_module_context()
assert isinstance(mcontext, ModuleContext)
mname = cast(str, mcontext.py__name__())
if mname.startswith("src."):
e = ValueError(f"Bad module name: {mname}")
+ files = list(project.path.iterdir())
- files = list(project.iterdir())
+ print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"project: {project}", file=sys.stderr)
print_err(f"files in root: {files}", file=sys.stderr)
raise e
m = script._module_node
assert isinstance(m, ptree.Module)
# mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
# m = parso.parse(path.read_text())
jmod = JModule(mname, m)
return jmod, script
===========changed ref 10===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
+ changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
+ if edit_lines is None:
+ edit_lines = list[int]()
+ for i, tks in enumerate(split_list(changed_code, Newline_id)):
+ if tks and tks[0] == Del_id:
+ continue
+ edit_lines.append(i)
+ code_span = dataclasses.replace(
+ code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
+ )
relevant_unchanged = self.get_relevant_unchanged(
code_span, relevant_changes, target_usages
)
- n_lines = code_span.line_range[1] - code_span.line_range[0]
prob = C3Problem(
code_span,
+ edit_lines, # one additional line for appending
- range(0, n_lines + 1), # one additional line for appending
relevant_changes=</s> |
coeditor.service/ServiceResponse.print | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> print(self.input_code, file=file)
| # module: coeditor.service
@dataclass
class ServiceResponse:
def print(self, file=sys.stdout):
print(f"Target file: {self.target_file}", file=file)
print(f"Edit range: {self.edit_start} - {self.edit_end}", file=file)
for i, s in enumerate(self.suggestions):
print(
f"\t--------------- Suggestion {i} (score: {s.score:.3g}) ---------------",
file=file,
)
print(textwrap.indent(s.change_preview, "\t"), file=file)
+ print(f"Input code:", file=file)
- print(f"original code:", file=file)
<0>
| ===========unchanged ref 0===========
at: coeditor.service
EditSuggestion(score: float, change_preview: str, new_code: str)
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ServiceResponse:
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
+ input_code: str
- old_code: str
suggestions: list[EditSuggestion]
===========changed ref 1===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
+ "old_code": self.input_code,
- "old_code": self.old_code,
"suggestions": [s.to_json() for s in self.suggestions],
}
===========changed ref 2===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 3===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 4===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 5===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+
===========changed ref 6===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class ChangedSpan:
+ def inverse(self) -> "ChangedSpan":
+ return ChangedSpan(
+ self.change.inverse(),
+ [c.inverse() for c in self.parent_scopes],
+ self.line_range,
+ )
+
===========changed ref 7===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
@staticmethod
+ def from_modules(module_change: Change[JModule], only_ast_changes: bool = True):
- def from_modules(module_change: Change[JModule]):
"Compute the change spans from two versions of the same module."
with _tlogger.timed("JModuleChange.from_modules"):
changed = get_changed_spans(
+ module_change.map(lambda m: m.as_scope), tuple(), only_ast_changes
- module_change.map(lambda m: m.as_scope), tuple()
)
return JModuleChange(module_change, changed)
===========changed ref 8===========
# module: scripts.prepare_data
if __name__ == "__main__":
os.chdir(proj_root())
dataset_name = "xl"
generator = C3ProblemGenerator()
transform = C3ProblemChangeDropout()
with run_long_task(f"Preparing dataset {dataset_name} with encoder {generator}"):
problems = make_or_load_dataset(
+ dataset_name, generator, transform, remake_problems=False
- dataset_name, generator, transform, remake_problems=True
)
tokenizer = C3ProblemTokenizer()
for name, probs in problems.items():
probs = cast(Sequence[C3Problem], probs)
print("=" * 40, name, "=" * 40)
stats = tokenizer._compute_stats(probs)
pretty_print_dict(stats)
===========changed ref 9===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
+
+ def remove_newline(seg: TokenSeq):
+ if seg and seg[-1] == Newline_id:
+ del seg[-1]
+ return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
+ return tuple(remove_newline(x) for x in result)
- return tuple(result)
segs = output_ids_as_seqs(tks)
assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
===========changed ref 10===========
# module: coeditor.scoped_changes
+ def _parse_module_script(project: jedi.Project, path: Path):
- def _parse_module_script(project: Path, path: Path):
assert path.is_absolute(), f"Path is not absolute: {path=}"
script = jedi.Script(path=path, project=project)
mcontext = script._get_module_context()
assert isinstance(mcontext, ModuleContext)
mname = cast(str, mcontext.py__name__())
if mname.startswith("src."):
e = ValueError(f"Bad module name: {mname}")
+ files = list(project.path.iterdir())
- files = list(project.iterdir())
+ print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"project: {project}", file=sys.stderr)
print_err(f"files in root: {files}", file=sys.stderr)
raise e
m = script._module_node
assert isinstance(m, ptree.Module)
# mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
# m = parso.parse(path.read_text())
jmod = JModule(mname, m)
return jmod, script
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> input_code=old_code,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
+ edit_lines: Sequence[int],
- line: int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s> for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_lines=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
suggested_change, preview = self.apply_edit_to_elem(
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=suggested_change.after,
)
suggestions.append(suggestion)
span = problem.span
old_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
target_file=file.as_posix(),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
- old_code=old_code,
<0> suggestions=suggestions,
)
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
+ edit_lines: Sequence[int],
- line: int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
+ problem = self.detector.get_problem(file, edit_lines)
- problem = self.detector.get_problem(file, line)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem
C3ProblemTokenizer(max_ref_tks: int=512, max_query_tks: int=512, max_output_tks: int=256, max_scope_tks: int=128, max_ref_tks_sum: int=512 * 16, ref_chunk_overlap: int=32, disable_builtin_defs: bool=True, disable_unchanged_refs: bool=False, current_code_only: bool=False)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
===========unchanged ref 1===========
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
RetrievalEditorModel(config: T5Config)
BatchArgs(batch_size: int=1, shuffle_extra_ids: bool=True)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
===========unchanged ref 2===========
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: coeditor.service
_tlogger = TimeLogger()
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs))
EditSuggestion(score: float, change_preview: str, new_code: str)
ServiceResponse(target_file: str, edit_start: tuple[int, int], edit_end: tuple[int, int], input_code: str, suggestions: list[EditSuggestion])
at: coeditor.service.ChangeDetector
project: Path
get_problem(self, target_file: RelPath, target_lines: Sequence[int] | int) -> C3Problem
get_problem(target_file: RelPath, target_lines: Sequence[int] | int) -> C3Problem
at: coeditor.service.EditPredictionService
apply_edit_to_elem(problem: C3Problem, out_tks: TokenSeq) -> tuple[Modified[str], str]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
at: pathlib
Path()
|
coeditor.service/EditPredictionService.apply_edit_to_elem | Modified | temp-1 | f15359a0a5fb8953d3296a0483517e4c98cbf2e6 | Improve service UX with inversed changes. | <0>:<add> preview = default_show_diff(current_code, new_change.after)
| # module: coeditor.service
@dataclass
class EditPredictionService:
@staticmethod
def apply_edit_to_elem(
problem: C3Problem,
out_tks: TokenSeq,
) -> tuple[Modified[str], str]:
change_tks = problem.span.original.tolist()
delta = TkDelta.from_output_tks(problem.edit_lines, out_tks)
new_change_tks = delta.apply_to_change(change_tks)
new_change = tokens_to_change(new_change_tks)
+ current_code = tokens_to_change(change_tks).after
- preview = default_show_diff(new_change.before, new_change.after)
<0> return new_change, preview
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.ChangedCodeSpan
line_range: LineRange
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.service.EditPredictionService.suggest_edit
file = file.relative_to(project)
file = to_rel_path(file)
suggestions = list[EditSuggestion]()
span = problem.span
old_code = tokens_to_change(span.original.tolist()).after
at: pathlib.PurePath
as_posix() -> str
===========changed ref 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
- def preview_changes(
- self,
- change: Modified[str],
- respect_lines: int,
- ) -> str:
- change_tks = change_to_tokens(change)
- (input_tks, output_tks), _ = change_tks_to_query_context(
- change_tks, respect_lines
- )
- new_change = tokens_to_change(inline_output_tokens(input_tks, output_tks))
- change_str = default_show_diff(new_change.before, new_change.after)
- return change_str
-
===========changed ref 1===========
# module: coeditor.service
@dataclass
class ServiceResponse:
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
+ input_code: str
- old_code: str
suggestions: list[EditSuggestion]
===========changed ref 2===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
+ "old_code": self.input_code,
- "old_code": self.old_code,
"suggestions": [s.to_json() for s in self.suggestions],
}
===========changed ref 3===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def print(self, file=sys.stdout):
print(f"Target file: {self.target_file}", file=file)
print(f"Edit range: {self.edit_start} - {self.edit_end}", file=file)
for i, s in enumerate(self.suggestions):
print(
f"\t--------------- Suggestion {i} (score: {s.score:.3g}) ---------------",
file=file,
)
print(textwrap.indent(s.change_preview, "\t"), file=file)
+ print(f"Input code:", file=file)
- print(f"original code:", file=file)
+ print(self.input_code, file=file)
===========changed ref 4===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
+ edit_lines: Sequence[int],
- line: int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
+ problem = self.detector.get_problem(file, edit_lines)
- problem = self.detector.get_problem(file, line)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_lines=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f</s>
===========changed ref 5===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
+ edit_lines: Sequence[int],
- line: int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: 1
<s>len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
suggested_change, preview = self.apply_edit_to_elem(
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=suggested_change.after,
)
suggestions.append(suggestion)
span = problem.span
old_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
target_file=file.as_posix(),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
+ input_code=old_code,
- old_code=old_code,
suggestions=suggestions,
)
===========changed ref 6===========
# module: coeditor.change
@dataclass(frozen=True)
class Deleted(_ChangeBase[E1]):
+ def inverse(self) -> "Added[E1]":
+ return Added(self.before)
+
===========changed ref 7===========
# module: coeditor.change
@dataclass(frozen=True)
class Added(_ChangeBase[E1]):
+ def inverse(self) -> "Deleted[E1]":
+ return Deleted(self.after)
+
===========changed ref 8===========
# module: coeditor.change
@dataclass(frozen=True)
class Modified(_ChangeBase[E1]):
+ def inverse(self) -> "Modified[E1]":
+ return Modified(self.after, self.before)
+
===========changed ref 9===========
# module: coeditor.scoped_changes
@dataclass(frozen=True)
class JModuleChange:
+ def inverse(self) -> Self:
+ "Create the inverse change."
+ return JModuleChange(
+ self.module_change.inverse(), [span.inverse() for span in self.changed]
+ )
+ |
coeditor.encoding/TkDelta.from_output_tks | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> assert_eq(len(segs), len(lines))
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(
+ lines: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool = True
+ ) -> "TkDelta":
- def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def remove_newline(seg: TokenSeq):
if seg and seg[-1] == Newline_id:
del seg[-1]
return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(remove_newline(x) for x in result)
segs = output_ids_as_seqs(tks)
+ if not allow_truncated_tks:
- assert_eq(len(segs), len(lines))
<0> deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
|
coeditor.c3problem/TkC3Problem.input_tks | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> return self.header.tolist() + self.main_input.tolist()
| # module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
@property
def input_tks(self) -> TokenSeq:
- return self.input.tolist()
<0>
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
"Tokenized contextual code change prediction problem."
+ main_input: TkArray
- input: TkArray
+ header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
# most relevant to least relevant
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
===========changed ref 1===========
# module: coeditor.encoding
- def apply_output_tks_to_change(
- change_tks: TokenSeq,
- respect_lines: int,
- out_tks: TokenSeq,
- ) -> Modified[str]:
- (input_tks, _), context = change_tks_to_query_context(change_tks, respect_lines)
- change_tks = (
- context
- + [Newline_id]
- + inline_output_tokens(input_tks, out_tks, leave_unpredicted=False)
- )
- return tokens_to_change(change_tks)
-
===========changed ref 2===========
# module: coeditor.encoding
+ def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
+ """compute the lines in the input to be edited"""
+ input_lines = list[int]()
+ offset = 0
+ for line in split_list(input_tks, Newline_id):
+ if line and line[0] == Del_id:
+ continue
+ if line and is_extra_id(line[0]):
+ input_lines.append(offset)
+ offset += 1
+ return input_lines
+
===========changed ref 3===========
# module: coeditor.encoding
- def change_tks_to_query_context(change_tks: TokenSeq, respect_lines: int):
- lines = split_list(change_tks, Newline_id)
- spliter = 0
- result_lines = 0
- for i, l in enumerate(lines):
- if l and l[0] == Del_id:
- pass
- else:
- result_lines += 1
- if result_lines <= respect_lines:
- spliter = i + 1
-
- context = join_list(lines[:spliter], Newline_id)
- query = change_tks_to_input_output(join_list(lines[spliter:], Newline_id))
- return query, context
-
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(
+ lines: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool = True
+ ) -> "TkDelta":
- def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def remove_newline(seg: TokenSeq):
if seg and seg[-1] == Newline_id:
del seg[-1]
return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(remove_newline(x) for x in result)
segs = output_ids_as_seqs(tks)
+ if not allow_truncated_tks:
+ assert_eq(len(segs), len(lines))
- assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> TkArray.new(scope_tks),
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
<s>chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
# compute the references that are relevant to this span
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", TkArray.new(chunk)))
ref_size_sum += sum(len(x) for x in changed)
if ref_size_sum < self.max_ref_tks_sum:
unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged)
for i, chunk in enumerate(unchanged):
all_refs.append((f"unchanged ref {i}", TkArray.new(chunk)))
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for (name, ref) in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
continue
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
+ TkArray.new(chunk_input),
- TkArray.new(scope_tks + chunk_input),
<0> TkArray.new(chunk_output),
path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
project=problem.src_info["project"],
commit=problem.src_info["commit"],
)
| ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -1
<s>tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -2
<s> = problem.edit_lines[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
for i, l in enumerate(problem.edit_lines):
for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_</s>
===========above chunk 2===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -3
span = problem.span
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
origin_lines = split_list(original, Newline_id)
edit</s>
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_lines: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_lines: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Sequence[ChangedCodeSpan]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.3"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
_group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
|
coeditor.model/RetrievalDecodingResult.show_prediction | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> header=TkArray.new([]),
| # module: coeditor.model
@dataclass
class RetrievalDecodingResult:
@classmethod
def show_prediction(cls, prob: C3Problem, pred: RetrievalModelPrediction) -> str:
span = prob.span
tk_prob = TkC3Problem(
+ main_input=TkArray.new(pred["input_ids"]),
- input=TkArray.new(pred["input_ids"]),
<0> output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
)
return tk_prob.show(pred["output_ids"])
| ===========unchanged ref 0===========
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.c3problem.ChangedHeader
change_tks: TkArray
type: str
line_range: LineRange
path: ProjectPath
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
truncated: bool
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
show(pred_tks: TokenSeq | None=None, skip_ctx: bool=False, skip_meta: bool=False) -> str
at: coeditor.model.AttentionMode
bidirectional = enum.auto()
at: coeditor.model.RetrievalDecodingResult.show_prediction
span = prob.span
===========unchanged ref 1===========
tk_prob = TkC3Problem(
main_input=TkArray.new(pred["input_ids"]),
header=TkArray.new([]),
output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
)
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
new(tks: Sequence[int]) -> "TkArray"
at: enum
auto(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...)
auto(x: Union[str, bytes, bytearray], base: int)
Enum()
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
@property
def input_tks(self) -> TokenSeq:
+ return self.header.tolist() + self.main_input.tolist()
- return self.input.tolist()
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
"Tokenized contextual code change prediction problem."
+ main_input: TkArray
- input: TkArray
+ header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
# most relevant to least relevant
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
===========changed ref 2===========
# module: coeditor.encoding
- def apply_output_tks_to_change(
- change_tks: TokenSeq,
- respect_lines: int,
- out_tks: TokenSeq,
- ) -> Modified[str]:
- (input_tks, _), context = change_tks_to_query_context(change_tks, respect_lines)
- change_tks = (
- context
- + [Newline_id]
- + inline_output_tokens(input_tks, out_tks, leave_unpredicted=False)
- )
- return tokens_to_change(change_tks)
-
===========changed ref 3===========
# module: coeditor.encoding
+ def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
+ """compute the lines in the input to be edited"""
+ input_lines = list[int]()
+ offset = 0
+ for line in split_list(input_tks, Newline_id):
+ if line and line[0] == Del_id:
+ continue
+ if line and is_extra_id(line[0]):
+ input_lines.append(offset)
+ offset += 1
+ return input_lines
+
===========changed ref 4===========
# module: coeditor.encoding
- def change_tks_to_query_context(change_tks: TokenSeq, respect_lines: int):
- lines = split_list(change_tks, Newline_id)
- spliter = 0
- result_lines = 0
- for i, l in enumerate(lines):
- if l and l[0] == Del_id:
- pass
- else:
- result_lines += 1
- if result_lines <= respect_lines:
- spliter = i + 1
-
- context = join_list(lines[:spliter], Newline_id)
- query = change_tks_to_input_output(join_list(lines[spliter:], Newline_id))
- return query, context
-
===========changed ref 5===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(
+ lines: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool = True
+ ) -> "TkDelta":
- def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def remove_newline(seg: TokenSeq):
if seg and seg[-1] == Newline_id:
del seg[-1]
return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(remove_newline(x) for x in result)
segs = output_ids_as_seqs(tks)
+ if not allow_truncated_tks:
+ assert_eq(len(segs), len(lines))
- assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.model/RetrievalEditorModel.predict_on_batch | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> pred = tokens_to_change(inline_output_tokens(change_tks, out))
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
<s>
out_tks = [remove_pad_ids(x) for x in out_tks]
assert isinstance(out_tks, list)
logging.debug("Max out length:", max(len(x) for x in out_tks))
assert_eq(len(out_tks), len(originals) * N)
originals = join_list([[x] * N for x in originals])
if (pred_scores := gen_out.get("sequences_scores", None)) is None:
pred_scores = [0.0] * len(out_tks)
if use_sampling:
pred_weights = [1.0 / N] * len(out_tks)
else:
pred_weights = [math.exp(x) for x in pred_scores]
with timed("assemble changes"):
pred_changes = list[Modified[str]]()
for change_tks, out in zip(originals, out_tks):
- pred = apply_output_tks_to_change(change_tks, 0, out)
<0> pred_changes.append(pred)
assert_eq(len(pred_changes), len(out_tks), len(pred_scores))
solutions = list[list[PredictedChange]]()
for i in range(0, len(pred_changes), N):
sols = marginalize_preds(
pred_changes[i : i + N],
out_tks[i : i + N],
pred_weights[i : i + N],
pred_scores[i : i + N],
)
solutions.append(sols[:n_solutions])
return solutions
| ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -1
<s>edChange(
preds[g[0]], out_tks[g[0]], sum(weights[i] for i in g), len(g)
)
for g in groups
]
use_sampling = dec_args.marginalize_samples > 1
if use_sampling:
assert_eq(dec_args.do_sample, True)
assert_eq(dec_args.num_beams, 1)
N = dec_args.marginalize_samples
else:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args()
input_ids = batch["input_ids"]
if not isinstance(input_ids, torch.LongTensor):
input_ids = torch.LongTensor(input_ids)
with timed("model.generate"), tqdm(total=dec_args.max_output_tks) as pbar:
gen_out = self.generate(
input_ids.to(self.device),
references=batch["references"],
query_ref_list=batch["query_ref_list"],
num_return_sequences=N,
return_dict_in_generate=True,
output_scores=True,
**gen_args,
tqdm=pbar,
)
assert not isinstance(gen_out, torch.LongTensor)
out_tks = gen_out["sequences"]
if isinstance(out_tks, torch.Tensor):
out_tks = out_tks.tolist()
out_tks = [remove_pad_ids(x) for x in out_tks]
assert isinstance(out</s>
===========above chunk 1===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -2
"""
Returns nested list of shape `(batch_size, n_solutions)`.
"""
timed = self.tlogger.timed
def marginalize_preds(
preds: Sequence[Modified[str]],
out_tks: Sequence[TokenSeq],
weights: Sequence[float],
scores: Sequence[float],
) -> list[PredictedChange]:
"""For sampling techniques, all sample should have equal weights 1/N. For
search-based techniques, the `weights` should equal to the solutions' probabilities."""
assert preds
groups = groupby(
range(len(preds)),
keyfunc=lambda i: normalize_code_by_ast(preds[i].after),
)
groups = list(groups.values())
for group in groups:
# within each group, sort by score
group.sort(key=lambda i: scores[i], reverse=True)
groups.sort(
key=lambda g: (sum(weights[i] for i in g), scores[g[0]]), reverse=True
)
return [
PredictedChange(
preds[g[0]], out_tks[g[0]], sum(weights[i] for i</s>
===========unchanged ref 0===========
at: coeditor._utils
groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
normalize_code_by_ast(code: str, sort_keyargs: bool=True, remove_doc_string: bool=True) -> str
at: coeditor.encoding
tokens_to_change(tokens: TokenSeq) -> Modified[str]
inline_output_tokens(input: TokenSeq, output: TokenSeq, leave_unpredicted=False) -> TokenSeq
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
remove_pad_ids(ids: TokenSeq) -> TokenSeq
PredictedChange(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
===========unchanged ref 1===========
marginalize_samples: int = 1
to_model_args() -> dict
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
save(self, save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None)
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: logging
debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: math
exp(x: SupportsFloat, /) -> float
at: pathlib
Path()
|
coeditor.service/ServiceResponse.print | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> print(f"Target lines: {self.target_lines}", file=file)
| # module: coeditor.service
@dataclass
class ServiceResponse:
def print(self, file=sys.stdout):
print(f"Target file: {self.target_file}", file=file)
print(f"Edit range: {self.edit_start} - {self.edit_end}", file=file)
<0> for i, s in enumerate(self.suggestions):
print(
f"\t--------------- Suggestion {i} (score: {s.score:.3g}) ---------------",
file=file,
)
print(textwrap.indent(s.change_preview, "\t"), file=file)
print(f"Input code:", file=file)
print(self.input_code, file=file)
| ===========unchanged ref 0===========
at: coeditor.service.EditSuggestion
score: float
change_preview: str
new_code: str
at: coeditor.service.ServiceResponse
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
at: sys
stdout: TextIO
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ServiceResponse:
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
+ target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
@property
def input_tks(self) -> TokenSeq:
+ return self.header.tolist() + self.main_input.tolist()
- return self.input.tolist()
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
"Tokenized contextual code change prediction problem."
+ main_input: TkArray
- input: TkArray
+ header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
# most relevant to least relevant
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
===========changed ref 3===========
# module: coeditor.encoding
- def apply_output_tks_to_change(
- change_tks: TokenSeq,
- respect_lines: int,
- out_tks: TokenSeq,
- ) -> Modified[str]:
- (input_tks, _), context = change_tks_to_query_context(change_tks, respect_lines)
- change_tks = (
- context
- + [Newline_id]
- + inline_output_tokens(input_tks, out_tks, leave_unpredicted=False)
- )
- return tokens_to_change(change_tks)
-
===========changed ref 4===========
# module: coeditor.encoding
+ def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
+ """compute the lines in the input to be edited"""
+ input_lines = list[int]()
+ offset = 0
+ for line in split_list(input_tks, Newline_id):
+ if line and line[0] == Del_id:
+ continue
+ if line and is_extra_id(line[0]):
+ input_lines.append(offset)
+ offset += 1
+ return input_lines
+
===========changed ref 5===========
# module: coeditor.encoding
- def change_tks_to_query_context(change_tks: TokenSeq, respect_lines: int):
- lines = split_list(change_tks, Newline_id)
- spliter = 0
- result_lines = 0
- for i, l in enumerate(lines):
- if l and l[0] == Del_id:
- pass
- else:
- result_lines += 1
- if result_lines <= respect_lines:
- spliter = i + 1
-
- context = join_list(lines[:spliter], Newline_id)
- query = change_tks_to_input_output(join_list(lines[spliter:], Newline_id))
- return query, context
-
===========changed ref 6===========
# module: coeditor.model
@dataclass
class RetrievalDecodingResult:
@classmethod
def show_prediction(cls, prob: C3Problem, pred: RetrievalModelPrediction) -> str:
span = prob.span
tk_prob = TkC3Problem(
+ main_input=TkArray.new(pred["input_ids"]),
- input=TkArray.new(pred["input_ids"]),
+ header=TkArray.new([]),
output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
)
return tk_prob.show(pred["output_ids"])
===========changed ref 7===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
@staticmethod
+ def from_output_tks(
+ lines: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool = True
+ ) -> "TkDelta":
- def from_output_tks(lines: Sequence[int], tks: TokenSeq) -> "TkDelta":
ad_tks = (Add_id, Del_id)
def remove_newline(seg: TokenSeq):
if seg and seg[-1] == Newline_id:
del seg[-1]
return seg
def seg_to_tuple(seg: TokenSeq) -> tuple[TokenSeq]:
result = list[TokenSeq]()
ptr = 0
for i, x in enumerate(seg):
if i > 0 and x in ad_tks:
if seg[ptr] in ad_tks:
result.append(seg[ptr:i])
ptr = i
if ptr < len(seg) and seg[ptr] in ad_tks:
result.append(seg[ptr:])
return tuple(remove_newline(x) for x in result)
segs = output_ids_as_seqs(tks)
+ if not allow_truncated_tks:
+ assert_eq(len(segs), len(lines))
- assert_eq(len(segs), len(lines))
deltas = {l: seg_to_tuple(seg) for l, seg in zip(lines, segs.values()) if seg}
return TkDelta(deltas)
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | a19ca9519caf03745b65f959367091eee884fc98 | - return target lines in service response. | <0>:<add> target_lines=target_lines,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s>_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_lines=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
suggested_change, preview = self.apply_edit_to_elem(
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=suggested_change.after,
)
suggestions.append(suggestion)
span = problem.span
old_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
target_file=file.as_posix(),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
<0> input_code=old_code,
suggestions=suggestions,
)
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
+ target_begin = problem.span.line_range[0]
+ target_lines = input_lines_from_tks(tk_prob.main_input.tolist())
+ target_lines = [target_begin + l for l in target_lines]
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
truncated: bool
at: coeditor.change.Modified
before: E1
===========unchanged ref 1===========
after: E1
unchanged: bool = False
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.encoding
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
===========unchanged ref 2===========
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: coeditor.service
_tlogger = TimeLogger()
EditSuggestion(score: float, change_preview: str, new_code: str)
ServiceResponse(target_file: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
at: coeditor.service.ChangeDetector
project: Path
untracked_as_additions: bool = True
ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
get_problem(target_file: RelPath, target_lines: Sequence[int] | int) -> C3Problem
at: coeditor.service.EditPredictionService
apply_edit_to_elem(problem: C3Problem, out_tks: TokenSeq) -> tuple[Modified[str], str]
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.detector = detector
self.model = model
self.c3_tkn = c3_tkn
self.dec_args = dec_args
self.show_max_solutions = 3
self.tlogger = _tlogger
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
|
coeditor.c3problem/C3ProblemTokenizer.__post_init__ | Modified | temp-1 | dcf432c49b5076a6d10b6299451cc66978201ea6 | Remove unused cachetools. | <0>:<add> self._offset_cache = dict[int, TkArray]()
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def __post_init__(self):
- self._offset_cache = LRUCache[int, TkArray](maxsize=100)
<0>
| ===========unchanged ref 0===========
at: coeditor.tk_array
TkArray()
|
coeditor.c3problem/C3Problem.print | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> ("edit_line_ids", str(self.edit_line_ids)),
| # module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def print(self):
main_change = self.span.delta.apply_to_change(self.span.original.tolist())
print_sections(
("summary", self.summary()),
("main change", decode_tokens(main_change)),
- ("edit_lines", str(self.edit_lines)),
<0> )
| ===========unchanged ref 0===========
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Sequence[ChangedCodeSpan]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
summary() -> str
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.common
print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.encoding
- def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
- """compute the lines in the input to be edited"""
- input_lines = list[int]()
- offset = 0
- for line in split_list(input_tks, Newline_id):
- if line and line[0] == Del_id:
- continue
- if line and is_extra_id(line[0]):
- input_lines.append(offset)
- offset += 1
- return input_lines
- |
coeditor.c3problem/C3GeneratorCache.create_problem | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> line_ids,
| # module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ target_lines: Sequence[int],
- edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
<s>.to_code_span(cspan))
code_span = self.to_code_span(target)
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
+ target_set = set(target_lines)
- if edit_lines is None:
+ line_ids = list[int]()
- edit_lines = list[int]()
+ input_l = target.line_range[0]
+ for i, tks in enumerate(split_list(changed_code, Newline_id)):
- for i, tks in enumerate(split_list(changed_code, Newline_id)):
+ if tks and tks[0] == Del_id:
- if tks and tks[0] == Del_id:
+ continue
- continue
+ if input_l in target_set:
+ line_ids.append(i)
- edit_lines.append(i)
+ input_l += 1
code_span = dataclasses.replace(
code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
)
relevant_unchanged = self.get_relevant_unchanged(
code_span, relevant_changes, target_usages
)
prob = C3Problem(
code_span,
- edit_lines, # one additional line for appending
<0> relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
| ===========above chunk 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ target_lines: Sequence[int],
- edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: -1
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
changed</s>
===========unchanged ref 0===========
at: coeditor.c3problem
ChangedHeader(change_tks: TkArray, type: str, line_range: LineRange, path: ProjectPath)
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
ModuleHierarchy()
at: coeditor.c3problem.C3GeneratorCache
to_code_span(span: ChangedSpan)
at: coeditor.c3problem.C3ProblemGenerator.process_change
processed_cspans = list[ChangedCodeSpan]()
problems = list[C3Problem]()
code_span = cache.to_code_span(span)
src_info: SrcInfo = {
"project": pchange.project_name,
"commit": pchange.commit_info,
}
prob = C3Problem(
code_span,
range(0, n_lines + 1), # one additional line for appending
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=span.change.map(lambda _: None),
src_info=src_info,
)
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
===========unchanged ref 1===========
delta: TkDelta
at: coeditor.c3problem.ModuleHierarchy
from_modules(modules: Iterable[str]) -> "ModuleHierarchy"
at: coeditor.change.Added
after: E1
map(f: Callable[[E1], T2]) -> "Added[T2]"
at: coeditor.change.Deleted
before: E1
map(f: Callable[[E1], T2]) -> "Deleted[T2]"
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
map(f: Callable[[E1], T2]) -> "Modified[T2]"
at: coeditor.common
split_list(lst: list[T1], sep: T1) -> list[list[T1]]
ModuleName = str
ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: coeditor.encoding
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
at: coeditor.encoding.TkDelta
apply_to_change(change: TokenSeq) -> TokenSeq
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JModule(mname: ModuleName, tree: ptree.Module)
JModuleChange(module_change: Change[JModule], changed: Sequence[ChangedSpan])
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
===========unchanged ref 2===========
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def print(self):
main_change = self.span.delta.apply_to_change(self.span.original.tolist())
print_sections(
("summary", self.summary()),
("main change", decode_tokens(main_change)),
+ ("edit_line_ids", str(self.edit_line_ids)),
- ("edit_lines", str(self.edit_lines)),
)
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
+ """Convert the edit lines (which are line ids including deleted lines) into
+ normal line numbers that do not include deleted lines."""
+ change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
+ input_l = self.span.line_range[0]
+ input_lines = list[int]()
+ for i, tks in enumerate(split_list(change_tks, Newline_id)):
+ if tks and tks[0] == Del_id:
+ continue
+ if i in line_ids:
+ input_lines.append(input_l)
+ input_l += 1
+
+ return input_lines
+ |
coeditor.c3problem/C3ProblemSimpleSplit.transform | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> l_range = prob.edit_line_ids
| # module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
- l_range = prob.edit_lines
<0> assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
sub_prob = dataclasses.replace(
prob, edit_lines=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
| ===========unchanged ref 0===========
at: abc
abstractmethod(callable: _FuncT) -> _FuncT
ABC()
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.scoped_changes.ChangedSpan
line_range: LineRange
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def print(self):
main_change = self.span.delta.apply_to_change(self.span.original.tolist())
print_sections(
("summary", self.summary()),
("main change", decode_tokens(main_change)),
+ ("edit_line_ids", str(self.edit_line_ids)),
- ("edit_lines", str(self.edit_lines)),
)
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
+ """Convert the edit lines (which are line ids including deleted lines) into
+ normal line numbers that do not include deleted lines."""
+ change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
+ input_l = self.span.line_range[0]
+ input_lines = list[int]()
+ for i, tks in enumerate(split_list(change_tks, Newline_id)):
+ if tks and tks[0] == Del_id:
+ continue
+ if i in line_ids:
+ input_lines.append(input_l)
+ input_l += 1
+
+ return input_lines
+
===========changed ref 3===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ target_lines: Sequence[int],
- edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
+ target_set = set(target_lines)
- if edit_lines is None:
+ line_ids = list[int]()
- edit_lines = list[int]()
+ input_l = target.line_range[0]
+ for i, tks in enumerate(split_list(changed_code, Newline_id)):
- for i, tks in enumerate(split_list(changed_code, Newline_id)):
+ if tks and tks[0] == Del_id:
- if tks and tks[0] == Del_id:
+ continue
- continue
+ if input_l in target_set:
+ line_ids.append(i)
- edit_lines.append(i)
+ input_l += 1
code_span = dataclasses.replace(
code_span, original=TkArray.new(changed_code), delta=T</s>
===========changed ref 4===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
+ target_lines: Sequence[int],
- edit_lines: Sequence[int] | None,
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: 1
<s> code_span = dataclasses.replace(
code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
)
relevant_unchanged = self.get_relevant_unchanged(
code_span, relevant_changes, target_usages
)
prob = C3Problem(
code_span,
+ line_ids,
- edit_lines, # one additional line for appending
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
===========changed ref 5===========
# module: coeditor.encoding
- def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
- """compute the lines in the input to be edited"""
- input_lines = list[int]()
- offset = 0
- for line in split_list(input_tks, Newline_id):
- if line and line[0] == Del_id:
- continue
- if line and is_extra_id(line[0]):
- input_lines.append(offset)
- offset += 1
- return input_lines
- |
coeditor.c3problem/C3ProblemChangeDropout.transform | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> l_range = prob.edit_line_ids
| # module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
original = prob.span.original
delta = prob.span.delta
- l_range = prob.edit_lines
<0> assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
grouped_keys = delta.change_groups()
should_dropout = len(grouped_keys) >= 2
if should_dropout:
n_to_drop = int(
len(grouped_keys) * random.random() * self.max_dropout_ratio
)
assert n_to_drop < len(grouped_keys)
keys_to_drop = join_list(
random_subset(grouped_keys, n_to_drop, rng=self._rng)
)
else:
keys_to_drop = []
if keys_to_drop:
delta1, delta2 = delta.decompose_for_change(keys_to_drop)
if random.random() < self._test_prob:
result1 = delta2.apply_to_change(
delta1.apply_to_change(original.tolist())
)
result2 = delta.apply_to_change(original.tolist())
code1 = tokens_to_change(result1).after
code2 = tokens_to_change(result2).after
if code1 != code2:
print_sections(
("result1", decode_tokens(result1)),
("result2", decode_tokens(result2)),
("delta", str(delta)),
("keys_to_drop", str(keys_to_drop)),
("delta1", str(delta1)),
("delta2", str(delta2)),
)
raise AssertionError("decompose_for_change failed.")
delta2_groups = delta2.change_groups()
if not delta2_groups:
print_err(f"{delta</s> | ===========below chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: 1
<s> delta2_groups = delta2.change_groups()
if not delta2_groups:
print_err(f"{delta=}, {keys_to_drop=}, {delta1=}")
raise AssertionError("Empty delta2_groups")
new_original = TkArray.new(delta1.apply_to_change(original.tolist()))
new_trans = prob.transformations + ("split", "dropout")
new_span = dataclasses.replace(
prob.span, original=new_original, delta=delta2
)
else:
new_trans = prob.transformations + ("split",)
new_span = prob.span
delta1 = None
delta2_groups = delta.change_groups()
prob_and_n = list[tuple[C3Problem, int]]()
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
edit_lines = range(i, j)
if delta1 is not None:
edit_lines = delta1.get_new_target_lines(edit_lines)
line_set = set(edit_lines)
n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups)
if n_groups > 0:
sub_prob = dataclasses.replace(
prob,
span=new_span,
edit_lines=edit_lines,
transformations=new_trans,
)
prob_and_n.append((sub_prob, n_groups))
# return the problems with the most changes
prob_and_n.sort(key=lambda p: p[1], reverse=True)
probs = [p[0] for p in prob_and_n]
return probs[: self.max_split_</s>
===========below chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: 2
<s>)
probs = [p[0] for p in prob_and_n]
return probs[: self.max_split_factor]
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTransform
transform(self, prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
delta: TkDelta
at: coeditor.change.Modified
after: E1
at: coeditor.common
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
apply_to_change(change: TokenSeq) -> TokenSeq
decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self]
change_groups() -> Sequence[tuple[DeltaKey, ...]]
===========unchanged ref 1===========
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: random
Random(x: Any=...)
random = _inst.random
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
+ l_range = prob.edit_line_ids
- l_range = prob.edit_lines
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
sub_prob = dataclasses.replace(
prob, edit_lines=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def print(self):
main_change = self.span.delta.apply_to_change(self.span.original.tolist())
print_sections(
("summary", self.summary()),
("main change", decode_tokens(main_change)),
+ ("edit_line_ids", str(self.edit_line_ids)),
- ("edit_lines", str(self.edit_lines)),
)
|
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> for i, l in enumerate(problem.edit_line_ids):
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
span = problem.span
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
origin_lines = split_list(original, Newline_id)
+ edit_start = problem.edit_line_ids[0]
- edit_start = problem.edit_lines[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
- for i, l in enumerate(problem.edit_lines):
<0> for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks</s> | ===========below chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 1
<s>output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}",</s>
===========below chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 2
<s>, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
# compute the references that are relevant to this span
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", TkArray.new(chunk)))
ref_size_sum += sum(len(x) for x in changed)
if ref_size_sum < self.max_ref_tks_sum:
unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged)
for i, chunk in enumerate(unchanged):
all_refs.append((f"unchanged ref {i}", TkArray.new(chunk)))
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for (name, ref) in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
continue
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input),
TkArray.new(scope_tks),
TkArray.new(chunk_output</s>
===========below chunk 2===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 3
<s> path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
project=problem.src_info["project"],
commit=problem.src_info["commit"],
)
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None)
C3TokenizerArgs(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
C3TokenizerArgs(map: Mapping[_KT, _VT], **kwargs: _VT)
C3TokenizerArgs(**kwargs: _VT)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Sequence[ChangedCodeSpan]
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.3"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
_group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TokenSeq]
|
coeditor.model/RetrievalDecodingResult.exact_match_accuracy | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> line_shift = prob.edit_line_ids[0]
| # module: coeditor.model
@dataclass
class RetrievalDecodingResult:
def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]:
ex2correct = dict[int, bool]()
bad_probs = list[C3Problem]()
for i, mp in enumerate(self.predictions):
prob = self.problems[i]
original = prob.span.original.tolist()
+ pred_delta = TkDelta.from_output_tks(prob.edit_line_ids, mp["output_ids"])
- pred_delta = TkDelta.from_output_tks(prob.edit_lines, mp["output_ids"])
+ label_delta = TkDelta.from_output_tks(prob.edit_line_ids, mp["labels"])
- label_delta = TkDelta.from_output_tks(prob.edit_lines, mp["labels"])
+ if not prob.edit_line_ids:
- if not prob.edit_lines:
bad_probs.append(prob)
continue
- line_shift = prob.edit_lines[0]
<0> pred_change = pred_delta.shifted(line_shift).apply_to_change(original)
label_change = label_delta.shifted(line_shift).apply_to_change(original)
pred_code = tokens_to_change(pred_change).after
label_code = tokens_to_change(label_change).after
ex2correct[i] = code_equal(pred_code, label_code)
correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct))
if bad_probs:
cprint("yellow", "Number of problems with no edits:", len(bad_probs))
for prob in bad_probs[:5]:
print(prob.summary())
return correct_count, ex2correct
| ===========unchanged ref 0===========
at: coeditor._utils
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
summary() -> str
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
CountedSum = WeightedSum[int, int]
code_equal(code1: str, code2: str) -> bool
at: coeditor.encoding
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
shifted(shift_lines: int) -> Self
===========unchanged ref 1===========
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
at: coeditor.model.RetrievalDecodingResult
eval_args: dict
problems: Sequence[C3Problem]
predictions: Sequence[RetrievalModelPrediction]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def print(self):
main_change = self.span.delta.apply_to_change(self.span.original.tolist())
print_sections(
("summary", self.summary()),
("main change", decode_tokens(main_change)),
+ ("edit_line_ids", str(self.edit_line_ids)),
- ("edit_lines", str(self.edit_lines)),
)
===========changed ref 2===========
# module: coeditor.encoding
- def input_lines_from_tks(input_tks: TokenSeq) -> list[int]:
- """compute the lines in the input to be edited"""
- input_lines = list[int]()
- offset = 0
- for line in split_list(input_tks, Newline_id):
- if line and line[0] == Del_id:
- continue
- if line and is_extra_id(line[0]):
- input_lines.append(offset)
- offset += 1
- return input_lines
-
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
+ """Convert the edit lines (which are line ids including deleted lines) into
+ normal line numbers that do not include deleted lines."""
+ change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
+ input_l = self.span.line_range[0]
+ input_lines = list[int]()
+ for i, tks in enumerate(split_list(change_tks, Newline_id)):
+ if tks and tks[0] == Del_id:
+ continue
+ if i in line_ids:
+ input_lines.append(input_l)
+ input_l += 1
+
+ return input_lines
+
===========changed ref 4===========
# module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
+ l_range = prob.edit_line_ids
- l_range = prob.edit_lines
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
sub_prob = dataclasses.replace(
prob, edit_lines=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
|
coeditor.service/ChangeDetector.get_problem | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> cspan, target_lines, changed, target_usages, src_info
| # module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> C3Problem:
<s> len(cspans) != 1:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span has not changed. Creating a trivial change.")
parents = [Modified.from_unchanged(s) for s in span.scope.ancestors()]
cspan = ChangedSpan(
Modified.from_unchanged(span.code), parents, span.line_range
)
else:
+ if len(cspans) > 1:
+ warnings.warn(
+ f"Multiple spans at line {first_line}. Using only the first one."
+ )
cspan = cspans[0]
with _tlogger.timed("usage analysis"):
script = jedi.Script(path=self.project / target_file)
lines_to_analyze = set(cspan.line_range.to_range())
lines_to_analyze.update(cspan.header_line_range.to_range())
target_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
src_info = SrcInfo(project=str(self.project), commit=None)
changed = {m: c.inverse() for m, c in rev_changed.items()}
cspan = cspan.inverse()
+ if isinstance(target_lines, int):
+ edit_start = first_line
+ edit_stop = edit_start + min(
+ self.max_lines_to_edit, len(cspan.line_range.to_range()) + 1
+ )
+ target_lines = range(edit_start, edit_stop)
+
prob = gcache.create_problem(
- cspan, edit_lines, changed, target_usages, src_info
<0> )
return prob
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> C3Problem:
# offset: -1
<s> case Deleted():
mod = self._get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Added(mod), only_ast_changes=False
)
case Modified(path1, path2):
assert path1 == path2
mod_old = self._get_index_module(rel_path)
mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_new, mod_old), only_ast_changes=False
)
modules = self.get_current_modules()
gcache = C3GeneratorCache({m.mname: m for m in modules.values()})
target_mod = self.get_current_module(target_file)
span = target_mod.as_scope.search_span_by_line(first_line)
if span is None:
print_err("Target scope:")
print_err(target_mod.as_scope)
raise ValueError(f"Could not find a statement span at line {first_line}.")
if target_mod.mname not in rev_changed:
print(f"Target module '{target_mod.mname}' has not changed.")
rev_changed[target_mod.mname] = JModuleChange(
Modified.from_unchanged(target_mod), []
)
cspans = [
c
for c in rev_changed[target_mod.mname].changed
if first_line in c.line_range
]
+ if len(cspans) == 0:
- if len(cspans) != 1:
# Create a trivial change for the target module if it wasn't changed.
print(</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> C3Problem:
# offset: -2
<s> == "??" and not self.untracked_as_additions:
continue
if tag.endswith("A"):
path_changes.add(Added(path))
elif tag.endswith("D"):
path_changes.add(Deleted(path))
if tag.endswith("M"):
path_changes.add(Modified(path, path))
else:
tag, path1, path2 = change_line.split(" ")
assert tag.startswith("R")
if is_src(path1):
path_changes.add(Deleted(path1))
if is_src(path2):
path_changes.add(Added(path2))
# use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()
for path_change in path_changes:
path = self.project / path_change.earlier
rel_path = to_rel_path(path.relative_to(self.project))
if not isinstance(path_change, Added) and not path.exists():
warnings.warn(f"File missing: {rel_path}")
if isinstance(path_change, Deleted):
continue
elif isinstance(path_change, Modified):
path_change = Added(path_change.after)
match path_change:
case Added():
mod = self.get_current_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Deleted(mod), only_ast_changes=False
)
case Deleted():
mod = self._get_index_module(rel_path)
rev_changed[mod.mname</s>
===========above chunk 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> C3Problem:
# offset: -3
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(
p not in self.ignore_dirs for p in path.parts
)
if isinstance(target_lines, int):
first_line = target_lines
- edit_lines = None
else:
first_line = target_lines[0]
- edit_lines = target_lines
changed_files = run_command(
["git", "status", "--porcelain"], cwd=self.project
).splitlines()
path_changes = set[Change[str]]()
for change_line in changed_files:
if not change_line:
continue
if change_line[2] == " ":
tag = change_line[:2]
path = change_line[3:]
if not is_src(path):
continue
if tag.endswith("M") or tag.endswith("A") or tag == "??":
</s>
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3GeneratorCache(pre_module_map: Mapping[ModuleName, JModule])
at: coeditor.change
Added(after: E1)
Deleted(before: E1)
Modified(before: E1, after: E1, unchanged: bool=False)
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
from_unchanged(v: T1) -> "Modified[T1]"
at: coeditor.common
RelPath = NewType("RelPath", Path)
to_rel_path(path: os.PathLike | str) -> RelPath
run_command(args: Sequence[str], cwd: str | Path) -> str
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
ModuleName = str
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> print(f"{problem.edit_line_ids=}", file=f)
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s>.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
- print(f"{problem.edit_lines=}", file=f)
<0> print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
suggested_change, preview = self.apply_edit_to_elem(
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
</s> | ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
+ n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
+ target_lines = problem.line_ids_to_input_lines(
+ problem.edit_line_ids[:n_out_segs]
+ )
- target_begin = problem.span.line_range[0]
- target_lines = input_lines_from_tks(tk_prob.main_input.tolist())
- target_lines = [target_begin + l for l in target_lines]
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
</s>
===========below chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: 1
<s>,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=suggested_change.after,
)
suggestions.append(suggestion)
span = problem.span
old_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
target_file=file.as_posix(),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
target_lines=target_lines,
input_code=old_code,
suggestions=suggestions,
)
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
truncated: bool
at: coeditor.change.Modified
after: E1
===========unchanged ref 1===========
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.encoding
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.service
EditSuggestion(score: float, change_preview: str, new_code: str)
===========unchanged ref 2===========
ServiceResponse(target_file: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
at: coeditor.service.ChangeDetector
get_problem(self, target_file: RelPath, target_lines: Sequence[int] | int) -> C3Problem
get_problem(target_file: RelPath, target_lines: Sequence[int] | int) -> C3Problem
at: coeditor.service.EditPredictionService
apply_edit_to_elem(problem: C3Problem, out_tks: TokenSeq) -> tuple[Modified[str], str]
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.detector = detector
self.model = model
self.c3_tkn = c3_tkn
self.dec_args = dec_args
self.show_max_solutions = 3
self.tlogger = _tlogger
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: pathlib
Path()
|
coeditor.service/EditPredictionService.apply_edit_to_elem | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> delta = TkDelta.from_output_tks(problem.edit_line_ids, out_tks)
| # module: coeditor.service
@dataclass
class EditPredictionService:
@staticmethod
def apply_edit_to_elem(
problem: C3Problem,
out_tks: TokenSeq,
) -> tuple[Modified[str], str]:
change_tks = problem.span.original.tolist()
- delta = TkDelta.from_output_tks(problem.edit_lines, out_tks)
<0> new_change_tks = delta.apply_to_change(change_tks)
new_change = tokens_to_change(new_change_tks)
current_code = tokens_to_change(change_tks).after
preview = default_show_diff(current_code, new_change.after)
return new_change, preview
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
default_show_diff(before: Any | None, after: Any | None, max_ctx: int | None=6) -> str
at: coeditor.change.Modified
after: E1
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
+ # The line ids in the change tks that should be edited
- # the lines to be edited, reletive to the start of the span.
+ edit_line_ids: Sequence[int]
- edit_lines: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 1===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def print(self, file=sys.stdout):
print(f"Target file: {self.target_file}", file=file)
print(f"Edit range: {self.edit_start} - {self.edit_end}", file=file)
+ target_lines = self.target_lines
+ if target_lines:
+ target_lines = f"{target_lines[0]}--{target_lines[-1]}"
+ print(f"Target lines: {target_lines}", file=file)
- print(f"Target lines: {self.target_lines}", file=file)
for i, s in enumerate(self.suggestions):
print(
f"\t--------------- Suggestion {i} (score: {s.score:.3g}) ---------------",
file=file,
)
print(textwrap.indent(s.change_preview, "\t"), file=file)
- print(f"Input code:", file=file)
- print(self.input_code, file=file)
===========changed ref 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
project: Path
untracked_as_additions: bool = True
ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
+ # if only the first target line is specified, how many following lines to edit.
+ max_lines_to_edit: int = 25
===========changed ref 3===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
+ n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
+ target_lines = problem.line_ids_to_input_lines(
+ problem.edit_line_ids[:n_out_segs]
+ )
- target_begin = problem.span.line_range[0]
- target_lines = input_lines_from_tks(tk_prob.main_input.tolist())
- target_lines = [target_begin + l for l in target_lines]
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to:</s>
===========changed ref 4===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: 1
<s> references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
+ print(f"{problem.edit_line_ids=}", file=f)
- print(f"{problem.edit_lines=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
suggested_change, preview = self.apply_edit_to_elem(
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=suggested_change.after,
)
suggestions.append(suggestion)
span = problem.span
</s> |
scripts.start_server/start_server | Modified | temp-1 | ae76b72f59bc67bc17f0d8c134675a4f97cad381 | Better edit range control: support editing below. | <0>:<add> print(f"Starting suggestion server at localhost:{port}")
| # module: scripts.start_server
- def start_server(
- device, port: int, drop_comments: bool = False, print_stats: bool = True
- ):
+ def start_server(device, port: int, print_stats: bool = True):
<s> model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
batch_args = BatchArgs.service_default()
dec_args = DecodingArgs(do_sample=False, num_beams=4, length_penalty=0.0)
services = dict[Path, EditPredictionService]()
@method
def suggestEdits(project: str, file: str, line: int, writeLogs: bool):
target_dir = Path(project).resolve()
if (service := services.get(target_dir)) is None:
detector = ChangeDetector(target_dir)
service = EditPredictionService(
detector,
model,
batch_args=batch_args,
dec_args=dec_args,
)
print(f"Service created for project: {target_dir}")
services[target_dir] = service
print(f"Suggesting edit for line {line} in {file}")
path = Path(file)
if not Path.is_absolute(path):
path = target_dir / path
try:
service.tlogger.clear()
model.tlogger = service.tlogger
log_dir = service.project / ".coeditor_logs" if writeLogs else None
response = service.suggest_edit(path, line, log_dir)
if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
except Exception as e:
print("Failed with exception:")
traceback.print_exception(e)
return Error(code=1, message=repr(e))
- print(f"Starting suggestion server ({drop_comments=}) at localhost:{port}")
<0> serve("localhost", port)
| ===========above chunk 0===========
# module: scripts.start_server
- def start_server(
- device, port: int, drop_comments: bool = False, print_stats: bool = True
- ):
+ def start_server(device, port: int, print_stats: bool = True):
# offset: -1
# this newer model is trained with comments
model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.4"
model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
batch_args</s>
===========unchanged ref 0===========
at: IPython.core.display_functions
display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
as_dataframe()
clear()
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
EditPredictionService()
at: coeditor.service.EditPredictionService
suggest_edit(file: RelPath, edit_lines: Sequence[int] | int, log_dir: Path | None=Path(".coeditor_logs")) -> ServiceResponse
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.tlogger = _tlogger
===========unchanged ref 1===========
at: coeditor.service.ServiceResponse
target_file: str
target_project: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
to_json()
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: jsonrpcserver.server
serve(name: str="", port: int=5000) -> None
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
is_absolute(self) -> bool
at: traceback
print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
at: transformers.modeling_utils.PreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_no_split_modules = None
===========unchanged ref 2===========
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
_keys_to_ignore_on_load_missing = None
_keys_to_ignore_on_load_unexpected = None
_keys_to_ignore_on_save = None
_tied_weights_keys = None
is_parallelizable = False
supports_gradient_checkpointing = False
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., /, *, device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., tensor: Tensor)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
project: Path
untracked_as_additions: bool = True
ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
+ # if only the first target line is specified, how many following lines to edit.
+ max_lines_to_edit: int = 25
|
coeditor.service/ChangeDetector.get_problem | Modified | temp-1 | f36e2f042ab0df36e0ad78e6db345ea669d32d3d | Improve apply_edit_to_elem. | <0>:<add> return prob, span
| # module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
<s>) == 0:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span has not changed. Creating a trivial change.")
parents = [Modified.from_unchanged(s) for s in span.scope.ancestors()]
cspan = ChangedSpan(
Modified.from_unchanged(span.code), parents, span.line_range
)
else:
if len(cspans) > 1:
warnings.warn(
f"Multiple spans at line {first_line}. Using only the first one."
)
cspan = cspans[0]
with _tlogger.timed("usage analysis"):
script = jedi.Script(path=self.project / target_file)
lines_to_analyze = set(cspan.line_range.to_range())
lines_to_analyze.update(cspan.header_line_range.to_range())
target_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
src_info = SrcInfo(project=str(self.project), commit=None)
changed = {m: c.inverse() for m, c in rev_changed.items()}
cspan = cspan.inverse()
if isinstance(target_lines, int):
edit_start = first_line
edit_stop = edit_start + min(
self.max_lines_to_edit, len(cspan.line_range.to_range()) + 1
)
target_lines = range(edit_start, edit_stop)
prob = gcache.create_problem(
cspan, target_lines, changed, target_usages, src_info
)
- return prob
<0>
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: -1
<s> mod = self._get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Added(mod), only_ast_changes=False
)
case Modified(path1, path2):
assert path1 == path2
mod_old = self._get_index_module(rel_path)
mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_new, mod_old), only_ast_changes=False
)
modules = self.get_current_modules()
gcache = C3GeneratorCache({m.mname: m for m in modules.values()})
target_mod = self.get_current_module(target_file)
span = target_mod.as_scope.search_span_by_line(first_line)
if span is None:
print_err("Target scope:")
print_err(target_mod.as_scope)
raise ValueError(f"Could not find a statement span at line {first_line}.")
if target_mod.mname not in rev_changed:
print(f"Target module '{target_mod.mname}' has not changed.")
rev_changed[target_mod.mname] = JModuleChange(
Modified.from_unchanged(target_mod), []
)
cspans = [
c
for c in rev_changed[target_mod.mname].changed
if first_line in c.line_range
]
if len(cspans) == 0:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: -2
<s> tag.endswith("A"):
path_changes.add(Added(path))
elif tag.endswith("D"):
path_changes.add(Deleted(path))
if tag.endswith("M"):
path_changes.add(Modified(path, path))
else:
tag, path1, path2 = change_line.split(" ")
assert tag.startswith("R")
if is_src(path1):
path_changes.add(Deleted(path1))
if is_src(path2):
path_changes.add(Added(path2))
# use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()
for path_change in path_changes:
path = self.project / path_change.earlier
rel_path = to_rel_path(path.relative_to(self.project))
if not isinstance(path_change, Added) and not path.exists():
warnings.warn(f"File missing: {rel_path}")
if isinstance(path_change, Deleted):
continue
elif isinstance(path_change, Modified):
path_change = Added(path_change.after)
match path_change:
case Added():
mod = self.get_current_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Deleted(mod), only_ast_changes=False
)
case Deleted():
mod = self._get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.</s>
===========above chunk 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: -3
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(
p not in self.ignore_dirs for p in path.parts
)
if isinstance(target_lines, int):
first_line = target_lines
else:
first_line = target_lines[0]
changed_files = run_command(
["git", "status", "--porcelain"], cwd=self.project
).splitlines()
path_changes = set[Change[str]]()
for change_line in changed_files:
if not change_line:
continue
if change_line[2] == " ":
tag = change_line[:2]
path = change_line[3:]
if not is_src(path):
continue
if tag.endswith("M") or tag.endswith("A") or tag == "??":
if tag == "??" and not self.untracked_as_additions:
continue
</s>
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3GeneratorCache(pre_module_map: Mapping[ModuleName, JModule])
at: coeditor.change
Added(after: E1)
Deleted(before: E1)
Modified(before: E1, after: E1, unchanged: bool=False)
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
from_unchanged(v: T1) -> "Modified[T1]"
at: coeditor.common
RelPath = NewType("RelPath", Path)
to_rel_path(path: os.PathLike | str) -> RelPath
run_command(args: Sequence[str], cwd: str | Path) -> str
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
ModuleName = str
|
coeditor.service/ServiceResponse.to_json | Modified | temp-1 | f36e2f042ab0df36e0ad78e6db345ea669d32d3d | Improve apply_edit_to_elem. | <0>:<add> "target_lines": list(self.target_lines),
| # module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
"old_code": self.input_code,
"suggestions": [s.to_json() for s in self.suggestions],
<0> }
| ===========unchanged ref 0===========
at: coeditor.service.EditSuggestion
score: float
change_preview: str
new_code: str
to_json()
at: coeditor.service.ServiceResponse
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(
p not in self.ignore_dirs for p in path.parts
)
if isinstance(target_lines, int):
first_line = target_lines
else:
first_line = target_lines[0]
changed_files = run_command(
["git", "status", "--porcelain"], cwd=self.project
).splitlines()
path_changes = set[Change[str]]()
for change_line in changed_files:
if not change_line:
continue
if change_line[2] == " ":
tag = change_line[:2]
path = change_line[3:]
if not is_src(path):
continue
if tag.endswith("M") or tag.endswith("A") or tag == "??":
if tag == "??" and not self.untracked_as_additions:
continue
if tag.endswith("A"):
path_changes.add(Added(path))
elif tag.endswith("D"):
path_changes.add(Deleted(path))
if tag.endswith("M"):
path_changes.add(Modified(path, path))
else:
tag, path1, path2 = change_line.split(" ")
assert tag.startswith("R")
if is_src(path1):
path_changes.add(Deleted(path1))
if is_src(path2):
path_changes.add(Added(path2))
# use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()</s>
===========changed ref 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: 1
<s> # use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()
for path_change in path_changes:
path = self.project / path_change.earlier
rel_path = to_rel_path(path.relative_to(self.project))
if not isinstance(path_change, Added) and not path.exists():
warnings.warn(f"File missing: {rel_path}")
if isinstance(path_change, Deleted):
continue
elif isinstance(path_change, Modified):
path_change = Added(path_change.after)
match path_change:
case Added():
mod = self.get_current_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Deleted(mod), only_ast_changes=False
)
case Deleted():
mod = self._get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Added(mod), only_ast_changes=False
)
case Modified(path1, path2):
assert path1 == path2
mod_old = self._get_index_module(rel_path)
mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_new, mod_old), only_ast_changes=False
)
modules = self.get_current_modules()
gcache = C3GeneratorCache({m.mname: m for</s>
===========changed ref 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: 2
<s> modules.values()})
target_mod = self.get_current_module(target_file)
span = target_mod.as_scope.search_span_by_line(first_line)
if span is None:
print_err("Target scope:")
print_err(target_mod.as_scope)
raise ValueError(f"Could not find a statement span at line {first_line}.")
if target_mod.mname not in rev_changed:
print(f"Target module '{target_mod.mname}' has not changed.")
rev_changed[target_mod.mname] = JModuleChange(
Modified.from_unchanged(target_mod), []
)
cspans = [
c
for c in rev_changed[target_mod.mname].changed
if first_line in c.line_range
]
if len(cspans) == 0:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span has not changed. Creating a trivial change.")
parents = [Modified.from_unchanged(s) for s in span.scope.ancestors()]
cspan = ChangedSpan(
Modified.from_unchanged(span.code), parents, span.line_range
)
else:
if len(cspans) > 1:
warnings.warn(
f"Multiple spans at line {first_line}. Using only the first one."
)
cspan = cspans[0]
with _tlogger.timed("usage analysis"):
script = jedi.Script(path=self.project</s>
===========changed ref 3===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
+ ) -> tuple[C3Problem, StatementSpan]:
- ) -> C3Problem:
# offset: 3
<s>_file)
lines_to_analyze = set(cspan.line_range.to_range())
lines_to_analyze.update(cspan.header_line_range.to_range())
target_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
src_info = SrcInfo(project=str(self.project), commit=None)
changed = {m: c.inverse() for m, c in rev_changed.items()}
cspan = cspan.inverse()
if isinstance(target_lines, int):
edit_start = first_line
edit_stop = edit_start + min(
self.max_lines_to_edit, len(cspan.line_range.to_range()) + 1
)
target_lines = range(edit_start, edit_stop)
prob = gcache.create_problem(
cspan, target_lines, changed, target_usages, src_info
)
+ return prob, span
- return prob
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | f36e2f042ab0df36e0ad78e6db345ea669d32d3d | Improve apply_edit_to_elem. | <0>:<add> target_file=str(self.project / file),
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s> print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
+ new_code, preview = self.apply_edit_to_elem(
- suggested_change, preview = self.apply_edit_to_elem(
+ span.code,
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
+ new_code=new_code,
- new_code=suggested_change.after,
)
suggestions.append(suggestion)
+ old_code = span.code
+ print("old code:", repr(old_code))
- span = problem.span
- old_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
- target_file=file.as_posix(),
<0> edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
target_lines=target_lines,
input_code=old_code,
suggestions=suggestions,
)
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
<s>.get_problem(file, edit_lines)
- problem = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
target_lines = problem.line_ids_to_input_lines(
problem.edit_line_ids[:n_out_segs]
)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -2
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
+ problem, span = self.detector.get_problem(file, edit_lines)
- problem = self.detector.get_problem(file, edit_lines</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
line_ids_to_input_lines(line_ids: Sequence[int]) -> Sequence[int]
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
===========unchanged ref 1===========
truncated: bool
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.encoding
is_extra_id(tk: int) -> bool
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
|
coeditor.service/EditPredictionService.apply_edit_to_elem | Modified | temp-1 | f36e2f042ab0df36e0ad78e6db345ea669d32d3d | Improve apply_edit_to_elem. | <0>:<add> return new_code, change_preview
| # module: coeditor.service
@dataclass
class EditPredictionService:
@staticmethod
def apply_edit_to_elem(
+ current_code: str,
problem: C3Problem,
out_tks: TokenSeq,
+ ) -> tuple[str, str]:
- ) -> tuple[Modified[str], str]:
- change_tks = problem.span.original.tolist()
delta = TkDelta.from_output_tks(problem.edit_line_ids, out_tks)
+ change1_tks = problem.span.original.tolist()
+ change1 = tokens_to_change(change1_tks)
+ change2_tks = delta.apply_to_change(change1_tks)
- new_change_tks = delta.apply_to_change(change_tks)
+ change2 = tokens_to_change(change2_tks)
- new_change = tokens_to_change(new_change_tks)
+ # change2 is supposed to be the change we want. However, the tokenizer
+ # sometimes does not perfectly encode the input, hence we extract the
+ # delta and directly apply it to the current code to avoid unnecessary
+ # tokenization.
- current_code = tokens_to_change(change_tks).after
+ change_preview = default_show_diff(change1.after, change2.after)
- preview = default_show_diff(current_code, new_change.after)
+ _, delta2 = StrDelta.from_change(Modified(change1.after, change2.after))
+ new_code = delta2.apply_to_input(current_code)
- return new_change, preview
<0>
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
at: coeditor.service.EditPredictionService.suggest_edit
suggestions = list[EditSuggestion]()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
"old_code": self.input_code,
"suggestions": [s.to_json() for s in self.suggestions],
+ "target_lines": list(self.target_lines),
}
===========changed ref 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
+ problem, span = self.detector.get_problem(file, edit_lines)
- problem = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
target_lines = problem.line_ids_to_input_lines(
problem.edit_line_ids[:n_out_segs]
)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_t</s>
===========changed ref 2===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: 1
<s>predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
+ new_code, preview = self.apply_edit_to_elem(
- suggested_change, preview = self.apply_edit_to_elem(
+ span.code,
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
+ new_code=new_code,
- new_code=suggested_change.after,
)
suggestions.append(suggestion)
+ old_code = span.code
+ print("old code:", repr(old_code))
- span = problem.span
- </s>
===========changed ref 3===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: 2
<s>_code = tokens_to_change(span.original.tolist()).after
return ServiceResponse(
+ target_file=str(self.project / file),
- target_file=file.as_posix(),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
target_lines=target_lines,
input_code=old_code,
suggestions=suggestions,
)
|
scripts.start_server/start_server | Modified | temp-1 | 17324431420bcb39712641ef584f624739828518 | Update start_server api. | <0>:<add> response = service.suggest_edit(path, lines, log_dir)
| # module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
<s> on device:", device)
batch_args = BatchArgs.service_default()
dec_args = DecodingArgs(do_sample=False, num_beams=4, length_penalty=0.0)
services = dict[Path, EditPredictionService]()
@method
+ def suggestEdits(
+ project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
+ ):
- def suggestEdits(project: str, file: str, line: int, writeLogs: bool):
target_dir = Path(project).resolve()
if (service := services.get(target_dir)) is None:
detector = ChangeDetector(target_dir)
service = EditPredictionService(
detector,
model,
batch_args=batch_args,
dec_args=dec_args,
)
print(f"Service created for project: {target_dir}")
services[target_dir] = service
+ print(f"Suggesting edit for lines {lines} in {file}")
- print(f"Suggesting edit for line {line} in {file}")
path = Path(file)
if not Path.is_absolute(path):
path = target_dir / path
try:
service.tlogger.clear()
model.tlogger = service.tlogger
log_dir = service.project / ".coeditor_logs" if writeLogs else None
- response = service.suggest_edit(path, line, log_dir)
<0> if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
except Exception as e:
print("Failed with exception:")
traceback.print_exception(e)
return Error(code=1, message=repr(e))
print(f"Starting suggestion server at localhost:{port}")
serve("localhost", port)
| ===========above chunk 0===========
# module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# offset: -1
# this newer model is trained with comments
model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.4"
model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
batch_args = BatchArgs.service_default()
dec_args = DecodingArgs(do</s>
===========unchanged ref 0===========
at: IPython.core.display_functions
display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
as_dataframe()
clear()
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
EditPredictionService()
at: coeditor.service.EditPredictionService
suggest_edit(file: RelPath, edit_lines: Sequence[int] | int, log_dir: Path | None=Path(".coeditor_logs")) -> ServiceResponse
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.tlogger = _tlogger
===========unchanged ref 1===========
at: coeditor.service.ServiceResponse
target_file: str
target_project: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
to_json()
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
is_absolute(self) -> bool
at: traceback
print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
at: transformers.modeling_utils.PreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_no_split_modules = None
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
===========unchanged ref 2===========
_keys_to_ignore_on_load_missing = None
_keys_to_ignore_on_load_unexpected = None
_keys_to_ignore_on_save = None
_tied_weights_keys = None
is_parallelizable = False
supports_gradient_checkpointing = False
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., /, *, device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., tensor: Tensor)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
|
coeditor.c3problem/C3GeneratorCache.__init__ | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
| # module: coeditor.c3problem
class C3GeneratorCache:
def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ # stores the changed headers
+ self._header_cache = dict[ProjectPath, ChangedHeader]()
- self.header_cache = dict[ProjectPath, ChangedHeader]()
+ # stores the definitions pre-edit
+ self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
- self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ # stores the changed code spans
+ self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
+ self._module_map = pre_module_map
- self.module_map = pre_module_map
- self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
<0>
| ===========unchanged ref 0===========
at: coeditor.c3problem
ChangedHeader(change_tks: TkArray, type: str, line_range: LineRange, path: ProjectPath)
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
at: coeditor.common
ModuleName = str
ProjectPath(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: coeditor.scoped_changes
JModule(mname: ModuleName, tree: ptree.Module)
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
===========changed ref 0===========
# module: coeditor.scoped_changes
- def _parse_module_script(project: jedi.Project, path: Path):
- assert path.is_absolute(), f"Path is not absolute: {path=}"
- script = jedi.Script(path=path, project=project)
- mcontext = script._get_module_context()
- assert isinstance(mcontext, ModuleContext)
- mname = cast(str, mcontext.py__name__())
- if mname.startswith("src."):
- e = ValueError(f"Bad module name: {mname}")
- files = list(project.path.iterdir())
- print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"files in root: {files}", file=sys.stderr)
- raise e
- m = script._module_node
- assert isinstance(m, ptree.Module)
- # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
- # m = parso.parse(path.read_text())
- jmod = JModule(mname, m)
- return jmod, script
-
===========changed ref 1===========
# module: coeditor.scoped_changes
- def _parse_module_script(project: jedi.Project, path: Path):
- assert path.is_absolute(), f"Path is not absolute: {path=}"
- script = jedi.Script(path=path, project=project)
- mcontext = script._get_module_context()
- assert isinstance(mcontext, ModuleContext)
- mname = cast(str, mcontext.py__name__())
- if mname.startswith("src."):
- e = ValueError(f"Bad module name: {mname}")
- files = list(project.path.iterdir())
- print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"files in root: {files}", file=sys.stderr)
- raise e
- m = script._module_node
- assert isinstance(m, ptree.Module)
- # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
- # m = parso.parse(path.read_text())
- jmod = JModule(mname, m)
- return jmod, script
-
===========changed ref 2===========
# module: coeditor.scoped_changes
def _edits_from_commit_history(
project: Path,
history: Sequence[CommitInfo],
change_processor: ProjectChangeProcessor[TProb],
ignore_dirs: set[str],
silent: bool,
time_limit: _Second | None,
) -> Sequence[TProb]:
start_time = time.time()
scripts = dict[RelPath, jedi.Script]()
results = list[TProb]()
def has_timeouted():
if time_limit and (time.time() - start_time > time_limit):
warnings.warn(
f"_edits_from_commit_history timed out for {project}. ({time_limit=}) "
f"Partial results ({len(results)}/{len(history)-1}) will be returned."
)
return True
else:
return False
def parse_module(path: Path):
with _tlogger.timed("parse_module"):
+ m, s = parse_module_script(proj, path)
- m, s = _parse_module_script(proj, path)
scripts[to_rel_path(path.relative_to(proj._path))] = s
return m
def checkout_commit(commit_hash: str):
with _tlogger.timed("checkout"):
subprocess.run(
["git", "checkout", "-f", commit_hash],
cwd=project,
capture_output=True,
check=True,
)
# to ensure sure we are not accidentally overriding real code changes
if list(project.iterdir()) != [project / ".git"]:
raise FileExistsError(f"Directory '{project}' should contain only '.git'.")
# checkout to the first commit
commit_now = history[-1]
checkout_commit(commit_now.hash)
proj = jedi.Project(path=project, added_sys_path=[project / "src"])
pstate = ProjectState(proj, scripts)
# now we can get the first project</s>
===========changed ref 3===========
# module: coeditor.scoped_changes
def _edits_from_commit_history(
project: Path,
history: Sequence[CommitInfo],
change_processor: ProjectChangeProcessor[TProb],
ignore_dirs: set[str],
silent: bool,
time_limit: _Second | None,
) -> Sequence[TProb]:
# offset: 1
<s>=[project / "src"])
pstate = ProjectState(proj, scripts)
# now we can get the first project state, although this not needed for now
# but we'll use it later for pre-edit analysis
init_srcs = [
to_rel_path(f.relative_to(project))
for f in rec_iter_files(project, dir_filter=lambda d: d.name not in ignore_dirs)
if f.suffix == ".py"
]
path2module = {
f: parse_module(project / f)
for f in tqdm(init_srcs, desc="building initial project", disable=silent)
}
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(p not in ignore_dirs for p in path.parts)
future_commits = list(reversed(history[:-1]))
for commit_next in tqdm(
future_commits, smoothing=0, desc="processing commits", disable=silent
):
if has_timeouted():
return results
# get changed files
changed_files = run_command(
[
"git",
"diff",
"--no-renames",
"--name-status",
commit_now.hash,
commit_next.hash,
],
cwd=project,
).splitlines()
path_changes = set[Change[str]]()
for line in changed_files:
segs = line.split("\t")
</s> |
coeditor.c3problem/C3GeneratorCache.get_pre_spans | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> def_cache[path] = cspans
| # module: coeditor.c3problem
class C3GeneratorCache:
def get_pre_spans(self, used: PyDefinition) -> list[ChangedCodeSpan]:
<s>tree, ptree.Function)
)
case StatementSpan():
stmt_spans.append(elem)
# add collapsed functions
for f_scope in func_scopes:
ancestors = f_scope.ancestors()
stmts = f_scope.spans[-1].statements
body_code = stmts[-1].get_code()
if len(stmts) > 1:
ellipsis = " " * (len(ancestors) - 1) + "# ...\n"
body_code = ellipsis + body_code
cspan = ChangedCodeSpan(
[self.to_header(Modified.from_unchanged(s)) for s in ancestors],
TkArray.new(encode_lines_join(body_code)),
TkDelta.empty(),
f_scope.spans[-1].line_range,
f_scope.path.module,
)
cspans.append(cspan)
# add statement spans
for stmt_span in stmt_spans:
ancestors = stmt_span.scope.ancestors()
stmts = stmt_span.statements
match stmts:
case [
ptree.PythonNode(
type="simple_stmt",
children=[ptree.String(), ptree.Newline()],
),
*rest,
]:
if not rest:
continue
stmts = rest
body_code = "".join(s.get_code() for s in stmts).lstrip("\n")
cspan = ChangedCodeSpan(
[self.to_header(Modified.from_unchanged(s)) for s in ancestors],
TkArray.new(encode_lines_join(body_code)),
TkDelta.empty(),
stmt_span.line_range,
stmt_span.scope.path.module,
)
cspans.append(cspan)
- cspan_cache[used] = cspans
<0> return cspans
| ===========above chunk 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
def get_pre_spans(self, used: PyDefinition) -> list[ChangedCodeSpan]:
# offset: -1
"Get the (pre-edit) spans for the given definition."
+ def_cache = self._pre_def_cache
- cspan_cache = self.cspan_cache
- if used.full_name in cspan_cache:
- return cspan_cache[used.full_name]
+ path = self._mod_hier.resolve_path(split_dots(used.full_name))
- path = self.mod_hier.resolve_path(split_dots(used.full_name))
cspans = list[ChangedCodeSpan]()
+ if path is None or (jmod := self._module_map.get(path.module)) is None:
- if path is None or (jmod := self.module_map.get(path.module)) is None:
- cspan_cache[used] = cspans
return cspans
+ if path in def_cache:
+ return def_cache[path]
scope = jmod.as_scope
elem = scope._search(path.path, used.start_pos[0])
func_scopes = list[ChangeScope]()
stmt_spans = list[StatementSpan]()
match elem:
case ChangeScope(tree=ptree.Function()):
func_scopes.append(elem)
case ChangeScope(tree=ptree.Class()):
# add all attrs and methods
stmt_spans.extend(elem.spans)
func_scopes.extend(
s
for s in elem.subscopes.values()
if isinstance(s.tree, ptree.Function)
)
case StatementSpan():
stmt_spans.append(elem)
#</s>
===========unchanged ref 0===========
at: coeditor._utils
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: coeditor.c3problem
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Sequence[ChangedCodeSpan], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
at: coeditor.c3problem.C3GeneratorCache
get_relevant_unchanged(this_change: ChangedCodeSpan, other_changes: Collection[ChangedCodeSpan], line_usages: LineUsageAnalysis)
to_header(self, cs: Change[ChangeScope]) -> ChangedHeader
to_header(cs: Change[ChangeScope]) -> ChangedHeader
at: coeditor.c3problem.C3GeneratorCache.__init__
self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
self._module_map = pre_module_map
self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
at: coeditor.c3problem.C3GeneratorCache.create_problem
relevant_changes = list[ChangedCodeSpan]()
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
target_set = set(target_lines)
line_ids = list[int]()
at: coeditor.c3problem.C3GeneratorCache.set_module_map
self._module_map = pre_module_map
self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
===========unchanged ref 1===========
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Sequence[ChangedCodeSpan]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ModuleHierarchy
resolve_path(segs: Sequence[str]) -> ProjectPath | None
at: coeditor.c3problem.PyDefinition
full_name: PyFullName
start_pos: tuple[int, int]
end_pos: tuple[int, int]
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Added
after: E1
map(f: Callable[[E1], T2]) -> "Added[T2]"
at: coeditor.change.Deleted
before: E1
map(f: Callable[[E1], T2]) -> "Deleted[T2]"
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
map(f: Callable[[E1], T2]) -> "Modified[T2]"
from_unchanged(v: T1) -> "Modified[T1]"
at: coeditor.common.ProjectPath
module: ModuleName
path: ElemPath
at: coeditor.encoding
Del_id = get_tk_id(Del)
encode_lines_join(text: str) -> TokenSeq
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
empty() -> "TkDelta"
at: coeditor.scoped_changes
ChangeScope(path: ProjectPath, tree: ScopeTree, spans: Sequence["StatementSpan"], subscopes: Mapping[str, Self], parent_scope: "ChangeScope | None")
===========unchanged ref 2===========
StatementSpan(nth_in_parent: int, statements: Sequence[PyNode], scope: ChangeScope)
at: coeditor.scoped_changes.ChangeScope
path: ProjectPath
tree: ScopeTree
spans: Sequence["StatementSpan"]
subscopes: Mapping[str, Self]
parent_scope: "ChangeScope | None"
ancestors() -> list[Self]
_search(path: ElemPath, line: int) -> Self | "StatementSpan"
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.StatementSpan
nth_in_parent: int
statements: Sequence[PyNode]
scope: ChangeScope
at: coeditor.scoped_changes.StatementSpan.__post_init__
self.line_range: LineRange = line_range(start, end)
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: parso.python.tree
Class(children)
Function(children)
at: parso.tree.BaseNode
__slots__ = ('children',)
get_code(include_prefix=True)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
|
coeditor.c3problem/C3GeneratorCache.to_header | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> self._header_cache[path] = ch
| # module: coeditor.c3problem
class C3GeneratorCache:
def to_header(self, cs: Change[ChangeScope]) -> ChangedHeader:
path = cs.earlier.path
+ if (ch := self._header_cache.get(path)) is None:
- if (ch := self.header_cache.get(path)) is None:
header_change = cs.map(lambda s: s.header_code.strip("\n"))
ch = ChangedHeader(
TkArray.new(change_to_tokens(header_change)),
cs.earlier.tree.type,
cs.earlier.header_line_range,
cs.earlier.path,
)
- self.header_cache[path] = ch
<0> return ch
| ===========unchanged ref 0===========
at: coeditor.c3problem.C3GeneratorCache.get_relevant_unchanged
sorted_defs = list(reversed(parent_defs))
used_defs = set(sorted_defs)
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.common
ModuleName = str
at: coeditor.scoped_changes
LineRange(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
===========changed ref 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def set_module_map(self, pre_module_map: Mapping[ModuleName, JModule]):
+ self._module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
+
===========changed ref 1===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def clear_caches(
+ self, pre_changed: set[ModuleName], post_changed: set[ModuleName]
+ ) -> None:
+ "Clear outdated caches."
+ for k in tuple(self._header_cache):
+ if k.module in pre_changed or k.module in post_changed:
+ del self._header_cache[k]
+ for k in tuple(self._pre_def_cache):
+ if k.module in pre_changed:
+ del self._pre_def_cache[k]
+ for k in tuple(self._cspan_cache):
+ if k[0] in pre_changed or k[0] in post_changed:
+ del self._cspan_cache[k]
+
===========changed ref 2===========
# module: coeditor.c3problem
class C3GeneratorCache:
def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ # stores the changed headers
+ self._header_cache = dict[ProjectPath, ChangedHeader]()
- self.header_cache = dict[ProjectPath, ChangedHeader]()
+ # stores the definitions pre-edit
+ self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
- self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ # stores the changed code spans
+ self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
+ self._module_map = pre_module_map
- self.module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
- self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
===========changed ref 3===========
# module: coeditor.c3problem
class C3GeneratorCache:
def get_pre_spans(self, used: PyDefinition) -> list[ChangedCodeSpan]:
"Get the (pre-edit) spans for the given definition."
+ def_cache = self._pre_def_cache
- cspan_cache = self.cspan_cache
- if used.full_name in cspan_cache:
- return cspan_cache[used.full_name]
+ path = self._mod_hier.resolve_path(split_dots(used.full_name))
- path = self.mod_hier.resolve_path(split_dots(used.full_name))
cspans = list[ChangedCodeSpan]()
+ if path is None or (jmod := self._module_map.get(path.module)) is None:
- if path is None or (jmod := self.module_map.get(path.module)) is None:
- cspan_cache[used] = cspans
return cspans
+ if path in def_cache:
+ return def_cache[path]
scope = jmod.as_scope
elem = scope._search(path.path, used.start_pos[0])
func_scopes = list[ChangeScope]()
stmt_spans = list[StatementSpan]()
match elem:
case ChangeScope(tree=ptree.Function()):
func_scopes.append(elem)
case ChangeScope(tree=ptree.Class()):
# add all attrs and methods
stmt_spans.extend(elem.spans)
func_scopes.extend(
s
for s in elem.subscopes.values()
if isinstance(s.tree, ptree.Function)
)
case StatementSpan():
stmt_spans.append(elem)
# add collapsed functions
for f_scope in func_scopes:
ancestors = f_scope.ancestors()
stmts = f_scope.spans[-1].statements
body_code = stmts[-1].get_code()
if len(stmts) > 1:
</s>
===========changed ref 4===========
# module: coeditor.c3problem
class C3GeneratorCache:
def get_pre_spans(self, used: PyDefinition) -> list[ChangedCodeSpan]:
# offset: 1
<s>spans[-1].statements
body_code = stmts[-1].get_code()
if len(stmts) > 1:
ellipsis = " " * (len(ancestors) - 1) + "# ...\n"
body_code = ellipsis + body_code
cspan = ChangedCodeSpan(
[self.to_header(Modified.from_unchanged(s)) for s in ancestors],
TkArray.new(encode_lines_join(body_code)),
TkDelta.empty(),
f_scope.spans[-1].line_range,
f_scope.path.module,
)
cspans.append(cspan)
# add statement spans
for stmt_span in stmt_spans:
ancestors = stmt_span.scope.ancestors()
stmts = stmt_span.statements
match stmts:
case [
ptree.PythonNode(
type="simple_stmt",
children=[ptree.String(), ptree.Newline()],
),
*rest,
]:
if not rest:
continue
stmts = rest
body_code = "".join(s.get_code() for s in stmts).lstrip("\n")
cspan = ChangedCodeSpan(
[self.to_header(Modified.from_unchanged(s)) for s in ancestors],
TkArray.new(encode_lines_join(body_code)),
TkDelta.empty(),
stmt_span.line_range,
stmt_span.scope.path.module,
)
cspans.append(cspan)
+ def_cache[path] = cspans
- cspan_cache[used] = cspans
return cspans
===========changed ref 5===========
# module: coeditor.scoped_changes
- def _parse_module_script(project: jedi.Project, path: Path):
- assert path.is_absolute(), f"Path is not absolute: {path=}"
- script = jedi.Script(path=path, project=project)
- mcontext = script._get_module_context()
- assert isinstance(mcontext, ModuleContext)
- mname = cast(str, mcontext.py__name__())
- if mname.startswith("src."):
- e = ValueError(f"Bad module name: {mname}")
- files = list(project.path.iterdir())
- print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"files in root: {files}", file=sys.stderr)
- raise e
- m = script._module_node
- assert isinstance(m, ptree.Module)
- # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
- # m = parso.parse(path.read_text())
- jmod = JModule(mname, m)
- return jmod, script
- |
coeditor.service/ChangeDetector.__post_init__ | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> self.jproj = jedi.Project(path=proj, added_sys_path=[proj / "src"])
| # module: coeditor.service
@dataclass
class ChangeDetector:
def __post_init__(self):
self.script_cache = TimedCache()
self.analyzer = JediUsageAnalyzer()
self._index_cache = TimedCache[RelPath, JModule, CommitHash]()
+ self._now_cache = TimedCache[RelPath, tuple[JModule, jedi.Script], SysTime]()
- self._now_cache = TimedCache[RelPath, JModule, SysTime]()
+ proj = self.project
<0>
| ===========unchanged ref 0===========
at: coeditor.c3problem
JediUsageAnalyzer(include_parent_usages: bool=True, include_builtins: bool=False)
at: coeditor.common
RelPath = NewType("RelPath", Path)
TimedCache()
at: coeditor.scoped_changes
JModule(mname: ModuleName, tree: ptree.Module)
at: coeditor.service
CommitHash = str
at: coeditor.service.ChangeDetector
project: Path
untracked_as_additions: bool = True
ignore_dirs: Collection[str] = field(default_factory=lambda: DefaultIgnoreDirs)
max_lines_to_edit: int = 25
===========changed ref 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def set_module_map(self, pre_module_map: Mapping[ModuleName, JModule]):
+ self._module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
+
===========changed ref 1===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def clear_caches(
+ self, pre_changed: set[ModuleName], post_changed: set[ModuleName]
+ ) -> None:
+ "Clear outdated caches."
+ for k in tuple(self._header_cache):
+ if k.module in pre_changed or k.module in post_changed:
+ del self._header_cache[k]
+ for k in tuple(self._pre_def_cache):
+ if k.module in pre_changed:
+ del self._pre_def_cache[k]
+ for k in tuple(self._cspan_cache):
+ if k[0] in pre_changed or k[0] in post_changed:
+ del self._cspan_cache[k]
+
===========changed ref 2===========
# module: coeditor.c3problem
class C3GeneratorCache:
def to_header(self, cs: Change[ChangeScope]) -> ChangedHeader:
path = cs.earlier.path
+ if (ch := self._header_cache.get(path)) is None:
- if (ch := self.header_cache.get(path)) is None:
header_change = cs.map(lambda s: s.header_code.strip("\n"))
ch = ChangedHeader(
TkArray.new(change_to_tokens(header_change)),
cs.earlier.tree.type,
cs.earlier.header_line_range,
cs.earlier.path,
)
+ self._header_cache[path] = ch
- self.header_cache[path] = ch
return ch
===========changed ref 3===========
# module: coeditor.c3problem
class C3GeneratorCache:
def to_code_span(self, span: ChangedSpan):
+ mod = span.parent_scopes[0].later.path.module
+ key = (mod, span.line_range)
+ if (cs := self._cspan_cache.get(key)) is not None:
+ return cs
original, delta = line_diffs_to_original_delta(
change_to_line_diffs(span.change)
)
+ result = ChangedCodeSpan(
- return ChangedCodeSpan(
headers=[self.to_header(cs) for cs in span.parent_scopes],
original=TkArray.new(encode_lines_join(original)),
delta=delta.to_tk_delta(),
line_range=span.line_range,
module=span.module,
)
+ self._cspan_cache[key] = result
+ return result
===========changed ref 4===========
# module: coeditor.c3problem
class C3GeneratorCache:
def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ # stores the changed headers
+ self._header_cache = dict[ProjectPath, ChangedHeader]()
- self.header_cache = dict[ProjectPath, ChangedHeader]()
+ # stores the definitions pre-edit
+ self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
- self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ # stores the changed code spans
+ self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
+ self._module_map = pre_module_map
- self.module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
- self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
===========changed ref 5===========
# module: coeditor.scoped_changes
- def _parse_module_script(project: jedi.Project, path: Path):
- assert path.is_absolute(), f"Path is not absolute: {path=}"
- script = jedi.Script(path=path, project=project)
- mcontext = script._get_module_context()
- assert isinstance(mcontext, ModuleContext)
- mname = cast(str, mcontext.py__name__())
- if mname.startswith("src."):
- e = ValueError(f"Bad module name: {mname}")
- files = list(project.path.iterdir())
- print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"files in root: {files}", file=sys.stderr)
- raise e
- m = script._module_node
- assert isinstance(m, ptree.Module)
- # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
- # m = parso.parse(path.read_text())
- jmod = JModule(mname, m)
- return jmod, script
-
===========changed ref 6===========
# module: coeditor.scoped_changes
- def _parse_module_script(project: jedi.Project, path: Path):
- assert path.is_absolute(), f"Path is not absolute: {path=}"
- script = jedi.Script(path=path, project=project)
- mcontext = script._get_module_context()
- assert isinstance(mcontext, ModuleContext)
- mname = cast(str, mcontext.py__name__())
- if mname.startswith("src."):
- e = ValueError(f"Bad module name: {mname}")
- files = list(project.path.iterdir())
- print_err(f"project: {project.path}", file=sys.stderr)
- print_err(f"files in root: {files}", file=sys.stderr)
- raise e
- m = script._module_node
- assert isinstance(m, ptree.Module)
- # mname = PythonProject.rel_path_to_module_name(path.relative_to(proj.path))
- # m = parso.parse(path.read_text())
- jmod = JModule(mname, m)
- return jmod, script
- |
coeditor.service/ChangeDetector._parse_index_module | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> self._updated_index_modules.add(mname)
| # module: coeditor.service
@dataclass
class ChangeDetector:
def _parse_index_module(self, path: RelPath) -> JModule:
+ code = file_content_from_commit(self.project, "", path.as_posix())
- code = self._get_index_content(path)
mod = parso.parse(code)
assert isinstance(mod, ptree.Module)
mname = path_to_module_name(path)
<0> return JModule(mname, mod)
| ===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.common
RelPath = NewType("RelPath", Path)
at: coeditor.service
SysTime = float
at: coeditor.service.ChangeDetector
project: Path
at: coeditor.service.ChangeDetector._get_index_hash
hash = out.split(" ")[1]
at: os
stat(path: _FdOrAnyPath, *, dir_fd: Optional[int]=..., follow_symlinks: bool=...) -> stat_result
at: os.stat_result
st_mode: int # protection bits,
st_ino: int # inode number,
st_dev: int # device,
st_nlink: int # number of hard links,
st_uid: int # user id of owner,
st_gid: int # group id of owner,
st_size: int # size of file, in bytes,
st_atime: float # time of most recent access,
st_mtime: float # time of most recent content modification,
st_ctime: float # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows)
st_atime_ns: int # time of most recent access, in nanoseconds
st_mtime_ns: int # time of most recent content modification in nanoseconds
st_ctime_ns: int # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) in nanoseconds
st_reparse_tag: int
st_file_attributes: int
st_blocks: int # number of blocks allocated for file
st_blksize: int # filesystem blocksize
st_rdev: int # type of device if an inode device
st_flags: int # user defined flags for file
st_gen: int # file generation number
===========unchanged ref 1===========
st_birthtime: int # time of file creation
st_rsize: int
st_creator: int
st_type: int
===========changed ref 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
- def _get_index_content(self, path: RelPath):
- return file_content_from_commit(self.project, "", path.as_posix())
-
===========changed ref 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
+ def _get_index_hash(self, path: RelPath) -> CommitHash:
+ out = run_command(["git", "ls-files", "-s", path.as_posix()], cwd=self.project)
+ hash = out.split(" ")[1]
+ assert_eq(len(hash), 40)
+ return hash
+
===========changed ref 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
- def _get_index_stamp(self, path: RelPath) -> CommitHash:
- out = run_command(["git", "ls-files", "-s", path.as_posix()], cwd=self.project)
- hash = out.split(" ")[1]
- assert_eq(len(hash), 40)
- return hash
-
===========changed ref 3===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def __post_init__(self):
self.script_cache = TimedCache()
self.analyzer = JediUsageAnalyzer()
self._index_cache = TimedCache[RelPath, JModule, CommitHash]()
+ self._now_cache = TimedCache[RelPath, tuple[JModule, jedi.Script], SysTime]()
- self._now_cache = TimedCache[RelPath, JModule, SysTime]()
+ proj = self.project
+ self.jproj = jedi.Project(path=proj, added_sys_path=[proj / "src"])
===========changed ref 4===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def set_module_map(self, pre_module_map: Mapping[ModuleName, JModule]):
+ self._module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
+
===========changed ref 5===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def clear_caches(
+ self, pre_changed: set[ModuleName], post_changed: set[ModuleName]
+ ) -> None:
+ "Clear outdated caches."
+ for k in tuple(self._header_cache):
+ if k.module in pre_changed or k.module in post_changed:
+ del self._header_cache[k]
+ for k in tuple(self._pre_def_cache):
+ if k.module in pre_changed:
+ del self._pre_def_cache[k]
+ for k in tuple(self._cspan_cache):
+ if k[0] in pre_changed or k[0] in post_changed:
+ del self._cspan_cache[k]
+
===========changed ref 6===========
# module: coeditor.c3problem
class C3GeneratorCache:
def to_header(self, cs: Change[ChangeScope]) -> ChangedHeader:
path = cs.earlier.path
+ if (ch := self._header_cache.get(path)) is None:
- if (ch := self.header_cache.get(path)) is None:
header_change = cs.map(lambda s: s.header_code.strip("\n"))
ch = ChangedHeader(
TkArray.new(change_to_tokens(header_change)),
cs.earlier.tree.type,
cs.earlier.header_line_range,
cs.earlier.path,
)
+ self._header_cache[path] = ch
- self.header_cache[path] = ch
return ch
===========changed ref 7===========
# module: coeditor.c3problem
class C3GeneratorCache:
def to_code_span(self, span: ChangedSpan):
+ mod = span.parent_scopes[0].later.path.module
+ key = (mod, span.line_range)
+ if (cs := self._cspan_cache.get(key)) is not None:
+ return cs
original, delta = line_diffs_to_original_delta(
change_to_line_diffs(span.change)
)
+ result = ChangedCodeSpan(
- return ChangedCodeSpan(
headers=[self.to_header(cs) for cs in span.parent_scopes],
original=TkArray.new(encode_lines_join(original)),
delta=delta.to_tk_delta(),
line_range=span.line_range,
module=span.module,
)
+ self._cspan_cache[key] = result
+ return result
===========changed ref 8===========
# module: coeditor.c3problem
class C3GeneratorCache:
def __init__(self, pre_module_map: Mapping[ModuleName, JModule]):
+ # stores the changed headers
+ self._header_cache = dict[ProjectPath, ChangedHeader]()
- self.header_cache = dict[ProjectPath, ChangedHeader]()
+ # stores the definitions pre-edit
+ self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
- self.cspan_cache = dict[PyDefinition, list[ChangedCodeSpan]]()
+ # stores the changed code spans
+ self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
+ self._module_map = pre_module_map
- self.module_map = pre_module_map
+ self._mod_hier = ModuleHierarchy.from_modules(pre_module_map)
- self.mod_hier = ModuleHierarchy.from_modules(pre_module_map)
|
coeditor.service/ChangeDetector.get_problem | Modified | temp-1 | ee631b00e147dd9af4b1fd9cbb7e3c71bb944333 | Speed up ChangeDetector.get_problem via caching. | <0>:<add> prob = self.gcache.create_problem(
| # module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
<s>) > 1:
warnings.warn(
f"Multiple spans at line {first_line}. Using only the first one."
)
cspan = cspans[0]
with _tlogger.timed("usage analysis"):
+ script = self.get_current_script(target_file)
- script = jedi.Script(path=self.project / target_file)
lines_to_analyze = set(cspan.line_range.to_range())
lines_to_analyze.update(cspan.header_line_range.to_range())
target_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
src_info = SrcInfo(project=str(self.project), commit=None)
changed = {m: c.inverse() for m, c in rev_changed.items()}
cspan = cspan.inverse()
if isinstance(target_lines, int):
edit_start = first_line
edit_stop = edit_start + min(
self.max_lines_to_edit, len(cspan.line_range.to_range()) + 1
)
target_lines = range(edit_start, edit_stop)
+ modules = self.get_current_modules()
+ self.gcache.set_module_map({m.mname: m for m in modules.values()})
+ self.gcache.clear_caches(self._updated_index_modules, self._updated_now_modules)
+ self._updated_index_modules.clear()
+ self._updated_now_modules.clear()
+
- prob = gcache.create_problem(
<0> cspan, target_lines, changed, target_usages, src_info
)
return prob, span
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -1
<s>(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_new, mod_old), only_ast_changes=False
)
- modules = self.get_current_modules()
- gcache = C3GeneratorCache({m.mname: m for m in modules.values()})
target_mod = self.get_current_module(target_file)
span = target_mod.as_scope.search_span_by_line(first_line)
if span is None:
print_err("Target scope:")
print_err(target_mod.as_scope)
raise ValueError(f"Could not find a statement span at line {first_line}.")
if target_mod.mname not in rev_changed:
print(f"Target module '{target_mod.mname}' has not changed.")
rev_changed[target_mod.mname] = JModuleChange(
Modified.from_unchanged(target_mod), []
)
cspans = [
c
for c in rev_changed[target_mod.mname].changed
if first_line in c.line_range
]
if len(cspans) == 0:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span has not changed. Creating a trivial change.")
parents = [Modified.from_unchanged(s) for s in span.scope.ancestors()]
cspan = ChangedSpan(
Modified.from_unchanged(span.code), parents, span.line_range
)
else:
if len(cspans) > 1:
warnings.warn(
f"Multiple spans at line {first_line}. Using only the first one."
</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -2
<s>(path2):
path_changes.add(Added(path2))
# use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()
for path_change in path_changes:
path = self.project / path_change.earlier
rel_path = to_rel_path(path.relative_to(self.project))
if not isinstance(path_change, Added) and not path.exists():
warnings.warn(f"File missing: {rel_path}")
if isinstance(path_change, Deleted):
continue
elif isinstance(path_change, Modified):
path_change = Added(path_change.after)
match path_change:
case Added():
mod = self.get_current_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Deleted(mod), only_ast_changes=False
)
case Deleted():
+ mod = self.get_index_module(rel_path)
- mod = self._get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Added(mod), only_ast_changes=False
)
case Modified(path1, path2):
assert path1 == path2
+ mod_old = self.get_index_module(rel_path)
- mod_old = self._get_index_module(rel_path)
mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified</s>
===========above chunk 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -3
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(
p not in self.ignore_dirs for p in path.parts
)
if isinstance(target_lines, int):
first_line = target_lines
else:
first_line = target_lines[0]
changed_files = run_command(
["git", "status", "--porcelain"], cwd=self.project
).splitlines()
path_changes = set[Change[str]]()
for change_line in changed_files:
if not change_line:
continue
if change_line[2] == " ":
tag = change_line[:2]
path = change_line[3:]
if not is_src(path):
continue
if tag.endswith("M") or tag.endswith("A") or tag == "??":
if tag == "??" and not self.untracked_as_additions:
continue
if tag.endswith("A"):
path_changes.add(Added(path))
elif tag.endswith("D"):
path_changes.add(Deleted(path))
if tag.endswith("M"):
path_changes.add(Modified(path, path))
else:
tag, path1, path2 = change_line.split(" ")
assert tag.startswith("R")
if is_src(path1):
path_changes.add(Deleted(path1))
if is</s>
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.change
Added(after: E1)
Deleted(before: E1)
Modified(before: E1, after: E1, unchanged: bool=False)
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
from_unchanged(v: T1) -> "Modified[T1]"
at: coeditor.common
RelPath = NewType("RelPath", Path)
to_rel_path(path: os.PathLike | str) -> RelPath
run_command(args: Sequence[str], cwd: str | Path) -> str
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
ModuleName = str
at: coeditor.common.TimedCache
cached(key: T1, stamp: TStamp, f: Callable[[], T2]) -> T2
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | a551a61fd2b4a8731ea5c88e927a3b8f83d0eb9c | Optimize service params. | <0>:<add> input_code=span.code,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s>)
print(f"{len(references)=}", file=f)
+ print("Relevant unchagned:", file=f)
+ for unchanged in problem.relevant_unchanged:
+ print(
+ "\tpath:",
+ unchanged.headers[-1].path,
+ "lines:",
+ unchanged.line_range,
+ file=f,
+ )
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
suggestions = list[EditSuggestion]()
for pred in predictions:
new_code, preview = self.apply_edit_to_elem(
span.code,
problem,
pred.out_tks,
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=new_code,
)
suggestions.append(suggestion)
- old_code = span.code
- print("old code:", repr(old_code))
-
return ServiceResponse(
target_file=str(self.project / file),
edit_start=(span.line_range[0], 0),
edit_end=(span.line_range[1], 0),
target_lines=target_lines,
- input_code=old_code,
<0> suggestions=suggestions,
)
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
<s>detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
target_lines = problem.line_ids_to_input_lines(
problem.edit_line_ids[:n_out_segs]
)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
+ print("Relevant unchagned:"</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -2
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
line_ids_to_input_lines(line_ids: Sequence[int]) -> Sequence[int]
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
===========unchanged ref 1===========
truncated: bool
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.encoding
is_extra_id(tk: int) -> bool
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.service
EditSuggestion(score: float, change_preview: str, new_code: str)
|
coeditor._utils/compute_line_diffs | Modified | temp-1 | c12caf0fb8dc2e6b2930f770fe911f9ed359960b | Improve suggestion preview. - Now shows changes of the target region only. | <0>:<add> elif tag != "?":
| # module: coeditor._utils
def compute_line_diffs(
before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
):
SizeLimit = 8000
if (
sum(len(x) for x in before) > SizeLimit
or sum(len(x) for x in after) > SizeLimit
):
return compute_line_diffs_fast(before, after)
differ = difflib.Differ()
result = []
for line in differ.compare(before, after):
assert len(line) >= 2
tag = line[0]
+ if keep_explain_lines and tag == "?":
- if keep_explain_lines or tag != "?":
+ result.append(tag + line[2:-1]) # remove trailing newline
<0> result.append(tag + line[2:])
return result
| ===========unchanged ref 0===========
at: coeditor._utils
compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
at: difflib
Differ(linejunk: Optional[_JunkCallback]=..., charjunk: Optional[_JunkCallback]=...)
at: difflib.Differ
compare(a: Sequence[_StrType], b: Sequence[_StrType]) -> Iterator[_StrType]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
|
coeditor.service/ChangeDetector.get_problem | Modified | temp-1 | c12caf0fb8dc2e6b2930f770fe911f9ed359960b | Improve suggestion preview. - Now shows changes of the target region only. | <0>:<add> edit_stop = min(edit_start + self.max_lines_to_edit, cspan.line_range[1])
| # module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
<s> first one."
)
cspan = cspans[0]
with _tlogger.timed("usage analysis"):
script = self.get_current_script(target_file)
lines_to_analyze = set(cspan.line_range.to_range())
lines_to_analyze.update(cspan.header_line_range.to_range())
target_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
src_info = SrcInfo(project=str(self.project), commit=None)
changed = {m: c.inverse() for m, c in rev_changed.items()}
cspan = cspan.inverse()
if isinstance(target_lines, int):
- edit_start = first_line
- edit_stop = edit_start + min(
- self.max_lines_to_edit, len(cspan.line_range.to_range()) + 1
- )
+ n_above = max(1, self.max_lines_to_edit // 2)
+ edit_start = max(cspan.line_range[0], first_line - n_above)
<0> target_lines = range(edit_start, edit_stop)
modules = self.get_current_modules()
self.gcache.set_module_map({m.mname: m for m in modules.values()})
self.gcache.clear_caches(self._updated_index_modules, self._updated_now_modules)
self._updated_index_modules.clear()
self._updated_now_modules.clear()
prob = self.gcache.create_problem(
cspan, target_lines, changed, target_usages, src_info
)
return prob, span
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -1
<s> mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname] = JModuleChange.from_modules(
Modified(mod_new, mod_old), only_ast_changes=False
)
target_mod = self.get_current_module(target_file)
span = target_mod.as_scope.search_span_by_line(first_line)
if span is None:
print_err("Target scope:")
print_err(target_mod.as_scope)
raise ValueError(f"Could not find a statement span at line {first_line}.")
if target_mod.mname not in rev_changed:
print(f"Target module '{target_mod.mname}' has not changed.")
rev_changed[target_mod.mname] = JModuleChange(
Modified.from_unchanged(target_mod), []
)
cspans = [
c
for c in rev_changed[target_mod.mname].changed
if first_line in c.line_range
]
if len(cspans) == 0:
# Create a trivial change for the target module if it wasn't changed.
print(f"Target span has not changed. Creating a trivial change.")
parents = [Modified.from_unchanged(s) for s in span.scope.ancestors()]
cspan = ChangedSpan(
Modified.from_unchanged(span.code), parents, span.line_range
)
else:
if len(cspans) > 1:
warnings.warn(
f"Multiple spans at line {first_line}. Using only the first one."
)
cspan = cspans[0]
with _tlogger.timed("usage analysis"):</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -2
<s>, path2 = change_line.split(" ")
assert tag.startswith("R")
if is_src(path1):
path_changes.add(Deleted(path1))
if is_src(path2):
path_changes.add(Added(path2))
# use inverse changes so that we can locate spans using their current locations
rev_changed = dict[ModuleName, JModuleChange]()
for path_change in path_changes:
path = self.project / path_change.earlier
rel_path = to_rel_path(path.relative_to(self.project))
if not isinstance(path_change, Added) and not path.exists():
warnings.warn(f"File missing: {rel_path}")
if isinstance(path_change, Deleted):
continue
elif isinstance(path_change, Modified):
path_change = Added(path_change.after)
match path_change:
case Added():
mod = self.get_current_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Deleted(mod), only_ast_changes=False
)
case Deleted():
mod = self.get_index_module(rel_path)
rev_changed[mod.mname] = JModuleChange.from_modules(
Added(mod), only_ast_changes=False
)
case Modified(path1, path2):
assert path1 == path2
mod_old = self.get_index_module(rel_path)
mod_new = self.get_current_module(rel_path)
rev_changed[mod_new.mname]</s>
===========above chunk 2===========
# module: coeditor.service
@dataclass
class ChangeDetector:
def get_problem(
self,
target_file: RelPath,
target_lines: Sequence[int] | int,
) -> tuple[C3Problem, StatementSpan]:
# offset: -3
def is_src(path_s: str) -> bool:
path = Path(path_s)
return path.suffix == ".py" and all(
p not in self.ignore_dirs for p in path.parts
)
if isinstance(target_lines, int):
first_line = target_lines
else:
first_line = target_lines[0]
changed_files = run_command(
["git", "status", "--porcelain"], cwd=self.project
).splitlines()
path_changes = set[Change[str]]()
for change_line in changed_files:
if not change_line:
continue
if change_line[2] == " ":
tag = change_line[:2]
path = change_line[3:]
if not is_src(path):
continue
if tag.endswith("M") or tag.endswith("A") or tag == "??":
if tag == "??" and not self.untracked_as_additions:
continue
if tag.endswith("A"):
path_changes.add(Added(path))
elif tag.endswith("D"):
path_changes.add(Deleted(path))
if tag.endswith("M"):
path_changes.add(Modified(path, path))
else:
tag,</s>
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.change
Added(after: E1)
Deleted(before: E1)
Modified(before: E1, after: E1, unchanged: bool=False)
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
from_unchanged(v: T1) -> "Modified[T1]"
at: coeditor.common
RelPath = NewType("RelPath", Path)
to_rel_path(path: os.PathLike | str) -> RelPath
run_command(args: Sequence[str], cwd: str | Path) -> str
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
ModuleName = str
at: coeditor.common.TimedCache
cached(key: T1, stamp: TStamp, f: Callable[[], T2]) -> T2
|
coeditor.service/EditPredictionService.suggest_edit | Modified | temp-1 | c12caf0fb8dc2e6b2930f770fe911f9ed359960b | Improve suggestion preview. - Now shows changes of the target region only. | <0>:<add> input_code=target.current_code,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
<s>(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(problem, pred)
print(pred_str, file=f)
+ target = self.get_target_code(span.code, problem, tk_prob)
suggestions = list[EditSuggestion]()
for pred in predictions:
+ pred_change = self.apply_edit_to_elem(
- new_code, preview = self.apply_edit_to_elem(
+ target,
- span.code,
problem,
pred.out_tks,
+ )
+ preview = "\n".join(
+ compute_line_diffs_fast(
+ splitlines(pred_change.before),
+ splitlines(pred_change.after),
+ )
)
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
+ new_code=pred_change.after,
- new_code=new_code,
)
suggestions.append(suggestion)
+ target_lines = target.target_lines
+
return ServiceResponse(
target_file=str(self.project / file),
+ edit_start=(target_lines[0], 0),
- edit_start=(span.line_range[0], 0),
+ edit_end=(target_lines[-1] + 1, 0),
- edit_end=(span.line_range[1], 0),
+ target_lines=target.target_lines,
- target_lines=target_lines,
- input_code=span.code,
<0> suggestions=suggestions,
)
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -1
<s>span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print("Relevant unchagned:", file=f)
for unchanged in problem.relevant_unchanged:
print(
"\tpath:",
unchanged.headers[-1].path,
"lines:",
unchanged.line_range,
file=f,
)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_tr</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
# offset: -2
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
- n_out_segs = sum(1 for tk in tk_prob.output_tks if is_extra_id(tk))
- target_lines = problem.line_ids_to_input_lines(
- problem.edit_line_ids[:n_out_segs]
- )
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem
C3ProblemTokenizer(max_ref_tks: int=512, max_query_tks: int=512, max_output_tks: int=256, max_scope_tks: int=128, max_ref_tks_sum: int=512 * 16, ref_chunk_overlap: int=32, disable_builtin_defs: bool=True, disable_unchanged_refs: bool=False, current_code_only: bool=False)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
===========unchanged ref 1===========
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
RetrievalEditorModel(config: T5Config)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
|
coeditor.service/ServiceResponse.to_json | Modified | temp-1 | 4d326a6e8e4a96693abfe2110182b60a17001a0b | Support line status visualization. | <0>:<add> "suggestions": [s for s in self.suggestions],
| # module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
"old_code": self.input_code,
- "suggestions": [s.to_json() for s in self.suggestions],
<0> "target_lines": list(self.target_lines),
}
| ===========unchanged ref 0===========
at: coeditor.service.ServiceResponse
target_file: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
at: sys
stdout: TextIO
===========changed ref 0===========
# module: coeditor.service
- @dataclass
+ class EditSuggestion(TypedDict):
- class EditSuggestion:
score: float
change_preview: str
new_code: str
+ line_status: list[tuple[int, StatusTag]]
===========changed ref 1===========
# module: coeditor.service
- @dataclass
+ class EditSuggestion(TypedDict):
- class EditSuggestion:
- def to_json(self):
- return {
- "score": self.score,
- "change_preview": self.change_preview,
- "new_code": self.new_code,
- }
- |
coeditor.service/ServiceResponse.print | Modified | temp-1 | 4d326a6e8e4a96693abfe2110182b60a17001a0b | Support line status visualization. | <0>:<add> print(textwrap.indent(s["change_preview"], "\t"), file=file)
| # module: coeditor.service
@dataclass
class ServiceResponse:
def print(self, file=sys.stdout):
print(f"Target file: {self.target_file}", file=file)
print(f"Edit range: {self.edit_start} - {self.edit_end}", file=file)
target_lines = self.target_lines
if target_lines:
target_lines = f"{target_lines[0]}--{target_lines[-1]}"
print(f"Target lines: {target_lines}", file=file)
for i, s in enumerate(self.suggestions):
print(
+ f"\t--------------- Suggestion {i} (score: {s['score']:.3g}) ---------------",
- f"\t--------------- Suggestion {i} (score: {s.score:.3g}) ---------------",
file=file,
)
- print(textwrap.indent(s.change_preview, "\t"), file=file)
<0>
| ===========unchanged ref 0===========
at: coeditor.service.ServiceResponse
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
suggestions: list[EditSuggestion]
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
===========changed ref 0===========
# module: coeditor.service
- @dataclass
+ class EditSuggestion(TypedDict):
- class EditSuggestion:
score: float
change_preview: str
new_code: str
+ line_status: list[tuple[int, StatusTag]]
===========changed ref 1===========
# module: coeditor.service
- @dataclass
+ class EditSuggestion(TypedDict):
- class EditSuggestion:
- def to_json(self):
- return {
- "score": self.score,
- "change_preview": self.change_preview,
- "new_code": self.new_code,
- }
-
===========changed ref 2===========
# module: coeditor.service
@dataclass
class ServiceResponse:
def to_json(self):
return {
"target_file": self.target_file,
"edit_start": self.edit_start,
"edit_end": self.edit_end,
"old_code": self.input_code,
+ "suggestions": [s for s in self.suggestions],
- "suggestions": [s.to_json() for s in self.suggestions],
"target_lines": list(self.target_lines),
}
|
coeditor.c3problem/PyDefinition.from_name | Modified | temp-1 | 499b1221d86c51463671e7dc35f97df30938c479 | New encoding for changed and unchanged refs. | <0>:<add> yield PyDefinition(full_name, start_pos, end_pos, signatures)
| # module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
# and (import_module := name.module_name)
- and (start_pos := name.get_definition_start_position())
- and (end_pos := name.get_definition_end_position())
+ and (signatures := name._get_docstring_signature())
):
full_name = PyFullName(full_name)
+ start_pos = name.get_definition_start_position()
+ end_pos = name.get_definition_end_position()
- # if not full_name.startswith(import_module):
- # raise ValueError(f"Inconsistent module: {full_name=}, {import_module=}")
- yield PyDefinition(full_name, start_pos, end_pos)
<0>
| ===========unchanged ref 0===========
at: coeditor.c3problem
PyFullName = NewType("PyFullName", str)
PyDefinition(full_name: PyFullName, start_pos: tuple[int, int], end_pos: tuple[int, int])
at: coeditor.c3problem.PyDefinition
full_name: PyFullName
start_pos: tuple[int, int]
end_pos: tuple[int, int]
at: jedi.api.classes
BaseName(inference_state, name)
at: jedi.api.classes.BaseName
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_collections': 'collections',
'_socket': 'socket',
'_sqlite3': 'sqlite3',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
in_builtin_module()
get_definition_start_position()
get_definition_end_position()
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
"""Note that the module and positions can be referring to either the import
statement or the actual definition."""
full_name: PyFullName
+ start_pos: tuple[int, int] | None
- start_pos: tuple[int, int]
+ end_pos: tuple[int, int] | None
- end_pos: tuple[int, int]
+ signatures: str
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
"Contextual code change prediction problem."
span: ChangedCodeSpan
# The line ids in the change tks that should be edited
edit_line_ids: Sequence[int]
# most relevant to least relevant
relevant_changes: Sequence[ChangedCodeSpan]
# most relevant to least relevant
+ relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
- relevant_unchanged: Sequence[ChangedCodeSpan]
# some optional information about how the problem was generated
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========changed ref 2===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def change_size(self) -> int:
+ sum = 0
+ for _, acts in self._deltas.items():
+ for act in acts:
+ sum += len(act)
+ return sum
+ |
coeditor.c3problem/C3ProblemTokenizer._group_encode_changed_refs | Modified | temp-1 | 499b1221d86c51463671e7dc35f97df30938c479 | New encoding for changed and unchanged refs. | <0>:<add> return join_list(self._encode_changed_ref(x) for x in changes)
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def _group_encode_changed_refs(
self, changes: Sequence[ChangedCodeSpan]
+ ) -> Sequence[TkArray]:
- ) -> Sequence[TokenSeq]:
- module2changes = groupby(changes, lambda c: c.module)
- all_chunks = list[TokenSeq]()
- for change_group in module2changes.values():
- change_group.sort(key=lambda c: c.line_range[0])
- segs = list[TokenSeq]()
- # we'll add module as the chunk header, so we start within the module
- last_scope = change_group[0].headers[:1]
- for c in change_group:
- header_diff = list[ChangedHeader]()
- for i, h in enumerate(c.headers):
- if i >= len(last_scope) or h.path != last_scope[i].path:
- header_diff.append(h)
- if header_diff:
- header_tks = self._encode_headers(header_diff, 0)
- segs.append(header_tks)
- c_tks = c.delta.apply_to_change(c.original.tolist())
- segs.append(c_tks)
- segs.append([Newline_id, Newline_id])
- last_scope = c.headers
- segs.append([Newline_id])
- mod_change = change_group[0].headers[:1]
- mod_chunks = break_into_chunks(
- join_list(segs),
- lambda i: self._encode_headers(mod_change, i),
- self.max_ref_tks,
- overlap=self.ref_chunk_overlap,
- )
- all_chunks.extend(mod_chunks)
- return all_chunks
<0>
| ===========unchanged ref 0===========
at: coeditor._utils
groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
at: coeditor.c3problem
ChangedHeader(change_tks: TkArray, type: str, line_range: LineRange, path: ProjectPath)
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
at: coeditor.c3problem.C3ProblemTokenizer
max_ref_tks: int = 512
ref_chunk_overlap: int = 32
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.encoding
Newline_id = get_tk_id("\n")
break_into_chunks(tks: TokenSeq, header_f: Callable[[int], TokenSeq], chunk_size: int, overlap: int, right_to_left: bool=False, add_bos: bool=True, max_return_chunks: int | None=None) -> list[TokenSeq]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
+ def _encode_changed_ref(self, cspan: ChangedCodeSpan) -> Iterable[TkArray]:
+ "encode a single changed reference"
+ cspan_tks = cspan.delta.apply_to_change(cspan.original.tolist())
+ for chunk in break_into_chunks(
+ cspan_tks,
+ lambda i: self._encode_headers(cspan.headers, i),
+ self.max_ref_tks,
+ overlap=self.ref_chunk_overlap,
+ ):
+ yield TkArray.new(chunk)
+
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def _group_encode_unchanged_refs(
+ self, elems: Mapping[PyFullName, PyDefinition]
- self, elems: Sequence[ChangedCodeSpan]
+ ) -> Sequence[TkArray]:
- ) -> Sequence[TokenSeq]:
+ results = list[TkArray]()
+ this_chunk = TokenSeq()
+ for name, defn in elems.items():
+ parent = ".".join(split_dots(name)[:-1])
+ text = f"at: {parent}\n{defn.signatures}\n\n"
+ tks = encode_lines_join(text)
+ tks = truncate_section(
+ tks, TruncateAt.Right, self.max_ref_tks, inplace=True
+ )
+ if len(this_chunk) + len(tks) > self.max_ref_tks:
+ results.append(TkArray.new(this_chunk))
+ this_chunk = tks
+ else:
+ this_chunk.extend(tks)
+ if this_chunk:
+ results.append(TkArray.new(this_chunk))
+ return results
- return self._group_encode_changed_refs(elems)
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
+ """
+ ## Change log
+ - 2.4: Encode each changed reference individually. Encode signatures for unchanged.
+ """
+
+ VERSION = "2.4"
- VERSION = "2.3"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
"""Note that the module and positions can be referring to either the import
statement or the actual definition."""
full_name: PyFullName
+ start_pos: tuple[int, int] | None
- start_pos: tuple[int, int]
+ end_pos: tuple[int, int] | None
- end_pos: tuple[int, int]
+ signatures: str
===========changed ref 4===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
"""
### Change log
+ - v2.7: Use new PyDefiniton that includes signatures.
- v2.6: fix missing changes in `JModuleChanges`. Rename to edit_line_ids.
- v2.5: fix newline encoding bug.
- v2.4: fix buggy encoding of `Added` and `Deleted` changes.
- v2.3: always generate problems with full editing range and move the problem
splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`.
"""
+ VERSION = "2.7"
- VERSION = "2.6"
# change spans with more than this many lines will be ignored
max_span_lines: int = 500
===========changed ref 5===========
# module: coeditor.c3problem
class C3GeneratorCache:
def get_relevant_unchanged(
self,
this_change: ChangedCodeSpan,
- other_changes: Collection[ChangedCodeSpan],
line_usages: LineUsageAnalysis,
):
module = this_change.module
# parent defs are also considered as used
- parent_defs = [
- PyDefinition(
- PyFullName(f"{c.path.module}.{c.path.path}"),
- (c.line_range[0], 0),
- (c.line_range[1], 0),
- )
- for c in this_change.headers
- ]
- # immediate parents are more relevant
- sorted_defs = list(reversed(parent_defs))
- used_defs = set(sorted_defs)
+ sorted_defs = dict[PyFullName, PyDefinition]()
all_lines = set(this_change.line_range.to_range())
all_lines.update(this_change.headers[-1].line_range.to_range())
for l in all_lines:
for pydef in line_usages.line2usages.get(l, set()):
if (
pydef.full_name.startswith(module)
+ and pydef.start_pos
and pydef.start_pos[0] in all_lines
):
# skip self references
continue
- if pydef not in used_defs:
- used_defs.add(pydef)
- sorted_defs.append(pydef)
+ sorted_defs.setdefault(pydef.full_name, pydef)
- # return unique cspans
- seen = set[tuple[ModuleName, LineRange]]()
- # we don't need to show the changed parts again
- for cspan in (this_change, *other_changes):
- seen.add((cspan.module, cspan.line_range))
- result = list[ChangedCodeSpan]()
+ return sorted_defs
- for used in</s> |
scripts.start_server/start_server | Modified | temp-1 | 499b1221d86c51463671e7dc35f97df30938c479 | New encoding for changed and unchanged refs. | <0>:<add> dec_args = DecodingArgs(do_sample=False, num_beams=4)
| # module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# this newer model is trained with comments
model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.4"
model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
- dec_args = DecodingArgs(do_sample=False, num_beams=1)
<0>
services = dict[Path, EditPredictionService]()
@method
def suggestEdits(
project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
):
target_dir = Path(project).resolve()
if (service := services.get(target_dir)) is None:
with timed_action(f"Create service for project: {target_dir}"):
detector = ChangeDetector(target_dir)
service = EditPredictionService(
detector,
model,
dec_args=dec_args,
)
services[target_dir] = service
print(f"Suggesting edit for lines {lines} in {file}")
path = Path(file)
if not Path.is_absolute(path):
path = target_dir / path
try:
service.tlogger.clear()
log_dir = service.project / ".coeditor_logs" if writeLogs else None
response = service.suggest_edit(path, lines, log_dir)
if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
except Exception as e:
print("Failed with exception:")
traceback.print_exception(e)
return Error(code=1, message=repr(e))
print(f"Starting suggestion server at localhost:{port}")
serve("localhost", port)
| ===========unchanged ref 0===========
at: IPython.core.display_functions
display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
at: coeditor._utils
timed_action(name: str, silent: bool=False)
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
as_dataframe()
clear()
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
EditPredictionService()
at: coeditor.service.EditPredictionService
suggest_edit(file: RelPath, edit_lines: Sequence[int] | int, log_dir: Path | None=Path(".coeditor_logs")) -> ServiceResponse
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.tlogger = _tlogger
===========unchanged ref 1===========
at: coeditor.service.ServiceResponse
target_file: str
target_project: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
to_json()
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: jsonrpcserver.server
serve(name: str="", port: int=5000) -> None
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
is_absolute(self) -> bool
at: traceback
print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
at: transformers.modeling_utils.PreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_no_split_modules = None
===========unchanged ref 2===========
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
_keys_to_ignore_on_load_missing = None
_keys_to_ignore_on_load_unexpected = None
_keys_to_ignore_on_save = None
_tied_weights_keys = None
is_parallelizable = False
supports_gradient_checkpointing = False
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., /, *, device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., tensor: Tensor)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def suggest_edit(
self,
file: Path,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> ServiceResponse:
timed = self.tlogger.timed
project = self.project
if file.is_absolute():
file = file.relative_to(project)
file = to_rel_path(file)
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
- print("Relevant unchagned:", file=f)
- </s> |
coeditor._utils/compute_line_diffs | Modified | temp-1 | c1a8d33c4eeaf20faca18a12b759677f80dbda72 | Improve line diffs. - Now for replace, `+` comes before `-`. | <0>:<add> for line in compare(before, after):
| # module: coeditor._utils
def compute_line_diffs(
before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
):
SizeLimit = 8000
+ differ = _ModifiedDiffer()
if (
sum(len(x) for x in before) > SizeLimit
or sum(len(x) for x in after) > SizeLimit
):
+ compare = differ.simple_compare
+ else:
+ compare = differ.compare
- return compute_line_diffs_fast(before, after)
- differ = difflib.Differ()
result = []
- for line in differ.compare(before, after):
<0> assert len(line) >= 2
tag = line[0]
if keep_explain_lines and tag == "?":
result.append(tag + line[2:-1]) # remove trailing newline
elif tag != "?":
result.append(tag + line[2:])
return result
| ===========unchanged ref 0===========
at: difflib
SequenceMatcher(isjunk: Optional[Callable[[_T], bool]]=..., a: Sequence[_T]=..., b: Sequence[_T]=..., autojunk: bool=...)
_keep_original_ws(s, tag_s)
Differ(linejunk: Optional[_JunkCallback]=..., charjunk: Optional[_JunkCallback]=...)
at: difflib.Differ
_dump(self, tag, x, lo, hi)
_qformat(self, aline, bline, atags, btags)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
|
coeditor.encoding/TkDelta.change_groups | Modified | temp-1 | c1a8d33c4eeaf20faca18a12b759677f80dbda72 | Improve line diffs. - Now for replace, `+` comes before `-`. | <0>:<add> while i < len(keys) and is_next(i - 1, i) and is_key_type(i, Del_id):
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def change_groups(self) -> Sequence[Sequence[DeltaKey]]:
<s>]
return l1 == l2 or (l2 == l1 + 1)
+ def is_same_line(key1: int, key2: int):
+ if key2 >= len(keys):
+ return False
+ l1 = keys[key1][0]
+ l2 = keys[key2][0]
+ return l1 == l2
+
groups = list[tuple[DeltaKey, ...]]()
keys = tuple(self.keys())
i = 0
while i < len(keys):
# case 1: <del> immediately followed by <add>
if (
+ is_same_line(i, i + 1)
- is_next(i, i + 1)
- and is_key_type(i, Del_id)
+ and is_key_type(i, Add_id)
- and is_key_type(i + 1, Add_id)
+ and is_key_type(i + 1, Del_id)
):
groups.append((keys[i], keys[i + 1]))
i += 2
continue
# case 2: contiguous <del> blocks
if is_key_type(i, Del_id):
del_block = [keys[i]]
i += 1
- while (
- i < len(keys)
- and is_next(i - 1, i)
- and is_key_type(i, Del_id)
- and not is_key_type(i + 1, Add_id)
- ):
<0> del_block.append(keys[i])
i += 1
if del_block:
groups.append(tuple(del_block))
continue
# case 3: single action
groups.append((keys[i],))
i += 1
assert_eq(join_list(groups), list(keys))
return groups
| ===========above chunk 0===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def change_groups(self) -> Sequence[Sequence[DeltaKey]]:
# offset: -1
"""Group individual changes into logical groups using heuristics.
Currently, this only groups a <del> immediately followed by an <add>,
as well as contiguous <del> blocks."""
def is_key_type(key_id: int, type: Token):
if key_id >= len(keys):
return False
return self[keys[key_id]][0] == type
def is_next(key1: int, key2: int):
if key2 >= len(keys):
return False
l1 = keys[key1][0]
l2 = keys[key2][0]
return l1 == l2 or (l2 == l1 + 1)
+ def is_same_line(</s>
===========unchanged ref 0===========
at: coeditor.common
Token = int
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
DeltaKey = NewType("DeltaKey", tuple[int, int])
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
keys() -> Iterable[DeltaKey]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor._utils
+ class _ModifiedDiffer(difflib.Differ):
+ """Modified Differ that outputs additions before deletions for replacement spans."""
+
+ _dump: Callable
+ linejunk: Any
+
===========changed ref 1===========
# module: coeditor._utils
+ class _ModifiedDiffer(difflib.Differ):
+ # switch + and - order
+ def _qformat(self, aline, bline, atags, btags):
+ atags = _keep_original_ws(aline, atags).rstrip()
+ btags = _keep_original_ws(bline, btags).rstrip()
+
+ yield "+ " + bline
+ if btags:
+ yield f"? {btags}\n"
+
+ yield "- " + aline
+ if atags:
+ yield f"? {atags}\n"
+
===========changed ref 2===========
# module: coeditor._utils
+ class _ModifiedDiffer(difflib.Differ):
+ # switch + and - order
+ def _plain_replace(self, a, alo, ahi, b, blo, bhi):
+ assert alo < ahi and blo < bhi
+ # dump the shorter block first -- reduces the burden on short-term
+ # memory if the blocks are of very different sizes
+ if bhi - blo < ahi - alo:
+ first = self._dump("+", b, blo, bhi)
+ second = self._dump("-", a, alo, ahi)
+ else:
+ first = self._dump("-", a, alo, ahi)
+ second = self._dump("+", b, blo, bhi)
+ # TODO: this changed
+ for g in second, first:
+ yield from g
+
===========changed ref 3===========
# module: coeditor._utils
+ class _ModifiedDiffer(difflib.Differ):
+ # use _plain_replace instead of _fancy_replace
+ def simple_compare(self, a, b):
+ cruncher = SequenceMatcher(self.linejunk, a, b)
+ for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
+ if tag == "replace":
+ g = self._plain_replace(a, alo, ahi, b, blo, bhi)
+ elif tag == "delete":
+ g = self._dump("-", a, alo, ahi)
+ elif tag == "insert":
+ g = self._dump("+", b, blo, bhi)
+ elif tag == "equal":
+ g = self._dump(" ", a, alo, ahi)
+ else:
+ raise ValueError("unknown tag %r" % (tag,))
+
+ yield from g
+
===========changed ref 4===========
# module: coeditor._utils
def compute_line_diffs(
before: Sequence[str], after: Sequence[str], keep_explain_lines: bool = False
):
SizeLimit = 8000
+ differ = _ModifiedDiffer()
if (
sum(len(x) for x in before) > SizeLimit
or sum(len(x) for x in after) > SizeLimit
):
+ compare = differ.simple_compare
+ else:
+ compare = differ.compare
- return compute_line_diffs_fast(before, after)
- differ = difflib.Differ()
result = []
+ for line in compare(before, after):
- for line in differ.compare(before, after):
assert len(line) >= 2
tag = line[0]
if keep_explain_lines and tag == "?":
result.append(tag + line[2:-1]) # remove trailing newline
elif tag != "?":
result.append(tag + line[2:])
return result
|
coeditor.common/random_subset | Modified | temp-1 | 16aba227a72a38fa84d6def0ae82aeb8a152563e | Keep items order in random_subset. | <0>:<add> ids = _subset_ids(ids)
| # module: coeditor.common
def random_subset(all, n: int, rng: random.Random | int | None = None):
if rng is None:
rng = random.Random()
elif isinstance(rng, int):
rng = random.Random(rng)
+
+ def _subset_ids(ids: list[int]):
+ rng.shuffle(ids)
+ ids = ids[:n]
+ ids.sort()
+ return ids
+
if isinstance(all, Sequence):
ids = list(range(len(all)))
+ ids = _subset_ids(ids)
- rng.shuffle(ids)
xs = [all[i] for i in ids[:n]]
return xs
elif isinstance(all, Mapping):
keys = [k for k in all]
ids = list(range(len(keys)))
- rng.shuffle(ids)
<0> return {(k := keys[i]): all[k] for i in ids[:n]}
else:
raise ArgumentError(all, f"Unsupported arg type: {type(all)}")
| ===========unchanged ref 0===========
at: random
Random(x: Any=...)
at: random.Random
VERSION = 3 # used by getstate/setstate
_randbelow = _randbelow_with_getrandbits
shuffle(x: MutableSequence[Any], random: Optional[Callable[[], float]]=...) -> None
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
|
tests.test_edits/TestChangeIdentities.test_tk_encodings | Modified | temp-1 | bd89c861a6d0df7743a0e8b8351a2bc7cd9d44a4 | More edits tests. | <0>:<add> c_rec2 = tokens_to_change(inlined)
| # module: tests.test_edits
class TestChangeIdentities:
def test_tk_encodings(self):
<s> * 40)
- # print(show_change(c))
c_tokens = change_to_tokens(c)
+ print_sections(
+ ("c_tokens", decode_tokens(c_tokens)),
- print("c_tokens\n------\n", decode_tokens(c_tokens))
+ )
c_rec = tokens_to_change(c_tokens)
assert_change_eq(
c_rec, c, "change_to_tokens |> tokens_to_change = identity: " + name
)
in_seq, out_seq = change_to_input_output(c)
+ print_sections(
+ ("in_seq", decode_tokens(in_seq)),
- print("in_seq\n------\n", decode_tokens(in_seq))
+ ("out_seq", decode_tokens(out_seq)),
- print("out_seq\n------\n", decode_tokens(out_seq))
+ )
assert_tks_eq(
in_seq,
code_to_input(encode_lines_join(get_before(c))),
"change_to_input_output mathese code_to_input: " + name,
)
if len(splitlines(get_before(c))) < N_Extra_Ids:
inlined = inline_output_tokens(in_seq, out_seq)
- if inlined:
- assert inlined[-1] == Newline_id
assert_tks_eq(
+ inlined, change_to_tokens(c), "inline_output_tokens: " + name
- inlined[:-1], change_to_tokens(c), "inline_output_tokens: " + name
)
- c_rec2 = tokens_to_change(inlined[:-1])
<0> assert_change_eq(c_rec2, c, "tokens_to_change(inlined): " + name)
| ===========above chunk 0===========
# module: tests.test_edits
class TestChangeIdentities:
def test_tk_encodings(self):
# offset: -1
for name, c in self.cases.items():
+ print("=" * 40, name, "=" * 40)
- # print(show_change(c))
c_tokens = change_to_tokens(c)
</s>
===========unchanged ref 0===========
at: coeditor.common
print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
assert_str_equal(actual: str, expect: str, name: str | None=None)
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta]
change_to_tokens(change: Change[str]) -> TokenSeq
tokens_to_change(tokens: TokenSeq) -> Modified[str]
change_to_input_output(change: Change[str]) -> tuple[TokenSeq, TokenSeq]
at: tests.test_edits
get_before(change: Change[str]) -> str
get_after(change: Change[str]) -> str
assert_change_eq(actual: Change[str], expected: Change[str], name: str)
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========unchanged ref 2===========
at: tests.test_edits.TestChangeIdentities.test_str_encodings
line_diffs = change_to_line_diffs(c)
===========changed ref 0===========
# module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected))
- if get_before(actual) != get_before(expected):
- print_sections(
- ("Expected before", get_before(expected)),
- ("Reconstructed before", get_before(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
+ assert_str_equal(get_after(actual), get_after(expected))
- if get_after(actual) != get_after(expected):
- print_sections(
- ("Expected after", get_after(expected)),
- ("Reconstructed after", get_after(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
===========changed ref 1===========
# module: tests.test_edits
+ def test_splitlines():
+ for n in range(100):
+ rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
+ input = fix_line_end("".join(rand_input))
+ lines = splitlines(input)
+
+ # basic identity
+ assert "".join(lines) == input
+ assert count_lines(input) == len(lines)
+
+ # encode and decode
+ enc = encode_lines_join(input)
+ assert decode_tokens(enc) == input
+
+ # split tokens
+ tk_lines = tk_splitlines(enc)
+ assert len(tk_lines) == len(lines)
+ assert_tks_eq(join_list(tk_lines), enc, "join_list(tk_lines)")
+
===========changed ref 2===========
# module: tests.test_edits
class TestChangeIdentities:
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
+ "add a new line": Modified("", "\n"),
+ "add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x =</s> |
tests.test_edits/TestChangeIdentities.test_get_new_target_lines | Modified | temp-1 | bd89c861a6d0df7743a0e8b8351a2bc7cd9d44a4 | More edits tests. | <0>:<add> n_origin_lines = len(tk_splitlines(original))
| # module: tests.test_edits
class TestChangeIdentities:
def test_get_new_target_lines(self):
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
- n_origin_lines = len(split_list(original, Newline_id))
<0> edit_lines = range(n_origin_lines + 1)
keys = tuple(delta.keys())
for _ in range(10):
n_keys = int(len(keys) * random.random())
sub_keys = random_subset(keys, n_keys)
sub_keys.sort()
delta1, delta2 = delta.decompose_for_change(sub_keys)
new_edit_lines = delta1.get_new_target_lines(edit_lines)
new_edit_set = set(new_edit_lines)
for l in delta2.changed_lines():
if l not in new_edit_set and l != n_origin_lines:
print_err(f"{edit_lines=}")
print_err("original", SEP)
print_err(add_line_numbers(decode_tokens(original), start=0))
print_err(SEP)
print_err(f"{delta=}")
print_err(f"{sub_keys=}")
print_err(f"{delta1=}")
print_err("step1", SEP)
step1 = delta1.apply_to_change(original)
print_err(add_line_numbers(decode_tokens(step1), start=0))
print_err(SEP)
print_err(f"{new_edit_lines=}")
print_err(f"{delta2=}")
raise AssertionError(f"{l=} not in {new_edit_lines=}")
| ===========unchanged ref 0===========
at: coeditor.common
SEP = "-" * 80
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.encoding.TkDelta
from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]
at: random
random = _inst.random
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========unchanged ref 2===========
at: tests.test_edits.TestChangeIdentities.test_delta_decomposition
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
expect = delta.apply_to_input(original)
sub_keys = random_subset(keys, n_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
===========changed ref 0===========
# module: tests.test_edits
+ def test_splitlines():
+ for n in range(100):
+ rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
+ input = fix_line_end("".join(rand_input))
+ lines = splitlines(input)
+
+ # basic identity
+ assert "".join(lines) == input
+ assert count_lines(input) == len(lines)
+
+ # encode and decode
+ enc = encode_lines_join(input)
+ assert decode_tokens(enc) == input
+
+ # split tokens
+ tk_lines = tk_splitlines(enc)
+ assert len(tk_lines) == len(lines)
+ assert_tks_eq(join_list(tk_lines), enc, "join_list(tk_lines)")
+
===========changed ref 1===========
# module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected))
- if get_before(actual) != get_before(expected):
- print_sections(
- ("Expected before", get_before(expected)),
- ("Reconstructed before", get_before(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
+ assert_str_equal(get_after(actual), get_after(expected))
- if get_after(actual) != get_after(expected):
- print_sections(
- ("Expected after", get_after(expected)),
- ("Reconstructed after", get_after(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
===========changed ref 2===========
# module: tests.test_edits
class TestChangeIdentities:
def test_delta_decomposition(self):
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
+ assert_tks_eq(original, encode_lines_join(get_before(c)), name)
expect = delta.apply_to_input(original)
+ assert_tks_eq(expect, encode_lines_join(get_after(c)), name)
keys = tuple(delta.keys())
for _ in range(50):
n_keys = int(len(keys) * random.random())
sub_keys = random_subset(keys, n_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
if step2 != expect:
+ print_sections(
+ ("change", decode_tokens(change_to_tokens(c))),
+ ("delta", str(delta)),
+ ("sub_keys", str(sub_keys)),
- print_err(f"{sub_keys=}")
- print_err("earlier", SEP)
- print_err(c.earlier)
- print_err("Original", SEP)
+ ("original", decode_tokens(original)),
- print_err(decode_tokens(original))
+ ("delta1", str(delta1)),
+ ("step1", decode_tokens(step1)),
+ ("delta2", str(delta2)),
+ ("step2", decode_tokens(step2)),
- print_err("Expect", SEP)
+ ("expect", decode_tokens(expect)),
- print_err(decode_tokens(expect))
- print_err("delta1", SEP)
- print_err(delta1)
- print_err("step1", SEP)
- print</s> |
coeditor.common/assert_str_equal | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> raise AssertionError(f"Strings didn't match: {name}")
| # module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
- raise AssertionError("Strings didn't match.")
<0>
| ===========unchanged ref 0===========
at: sys
stderr: TextIO
===========changed ref 0===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+ |
coeditor.encoding/TkDelta.apply_to_input | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> lines = tk_splitlines(input)
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
- lines = split_list(input, Newline_id)
<0> new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
DeltaKey = NewType("DeltaKey", tuple[int, int])
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 2===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/TkDelta.apply_to_change | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> lines = tk_splitlines(change)
| # module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
- lines = split_list(change, Newline_id)
<0>
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 2===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 3===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/change_tks_to_original_delta | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> diffs = tk_splitlines(change)
| # module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
- diffs = split_list(change, Newline_id)
<0> input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 2===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 3===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 4===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/tokens_to_change | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> tk_lines = tk_splitlines(tokens)
| # module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
- tk_lines = split_list(tokens, Newline_id)
<0>
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
| ===========unchanged ref 0===========
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
===========changed ref 2===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 3===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 4===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 5===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/code_to_input | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> tk_lines = tk_splitlines(code_tks)
| # module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
- tk_lines = split_list(code_tks, Newline_id)
<0> tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
| ===========unchanged ref 0===========
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
N_Extra_Ids = 100
get_extra_id(i: int) -> Token
tk_splitlines(tks: TokenSeq)
at: coeditor.encoding.tokens_to_change
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 2===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
===========changed ref 3===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 5===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 6===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/change_tks_to_input_output | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> tk_lines = tk_splitlines(tks)
| # module: coeditor.encoding
def change_tks_to_input_output(tks: TokenSeq) -> tuple[TokenSeq, TokenSeq]:
"See `change_to_input_output`."
- tk_lines = split_list(tks, Newline_id)
<0>
input_lines: list[TokenSeq] = []
out_buff = TokenSeq()
output_segs: list[TokenSeq] = []
for i, tk_line in enumerate(tk_lines):
if tk_line and tk_line[0] == Add_id:
out_buff.extend(tk_line)
out_buff.append(Newline_id)
elif tk_line and tk_line[0] == Del_id:
input_lines.append(tk_line[1:])
out_buff.append(Del_id)
output_segs.append(out_buff)
out_buff = TokenSeq()
else:
input_lines.append(tk_line)
output_segs.append(out_buff)
out_buff = TokenSeq()
input_lines.append(TokenSeq())
output_segs.append(out_buff)
assert_eq(len(input_lines), len(output_segs))
output_segs = output_segs[:N_Extra_Ids]
for i in range(0, len(output_segs)):
input_lines[i] = [get_extra_id(i)] + input_lines[i]
output_segs[i] = [get_extra_id(i)] + output_segs[i]
input = join_list(input_lines, Newline_id)
output = join_list(output_segs, None)
if not check_output_tokens(output):
str_segs = [decode_tokens(tks) for tks in output_segs]
change = tokens_to_change(tks)
msg = f"Invalid output tokens.\n Output segs: {str_segs}\n Change: {show_change(</s> | ===========below chunk 0===========
# module: coeditor.encoding
def change_tks_to_input_output(tks: TokenSeq) -> tuple[TokenSeq, TokenSeq]:
# offset: 1
<s>ks)
msg = f"Invalid output tokens.\n Output segs: {str_segs}\n Change: {show_change(change)}"
raise ValueError(msg)
return input, output
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
N_Extra_Ids = 100
get_extra_id(i: int) -> Token
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
change_to_tokens(change: Change[str]) -> TokenSeq
check_output_tokens(tks: TokenSeq) -> bool
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 2===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 3===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 5===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 6===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 7===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.encoding/TokenizedEdit.show | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> for line_tks in tk_splitlines(self.main_tks):
| # module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
<s>.append(Newline_id)
seg = seg + origin_line
label = show_label(id_map.get(k, -1))
lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}")
return "".join(lines)
def show_ctx(ctx_tks: TokenSeq):
+ lines = tk_splitlines(ctx_tks)
- lines = split_list(ctx_tks, Newline_id)
return "\n".join(" " + show_content(l) for l in lines)
main_segs = output_ids_as_seqs(self.main_tks)
id_map = {k: i for i, k in enumerate(main_segs)}
main_lines = list[str]()
- for line_tks in split_list(self.main_tks, Newline_id):
<0> if line_tks and is_extra_id(line_tks[0]):
prefix = show_label(id_map.get(line_tks[0], -1))
line = prefix + show_content(line_tks[1:])
else:
line = " " + show_content(line_tks)
main_lines.append(line)
pred_lines = (
["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"]
if pred_tks
else []
)
outputs = [
"-" * 80,
*self.meta_data_lines(),
"========Ground Truth========",
show_extra_tokens(self.output_tks, main_segs),
*pred_lines,
"========Main Code========",
"\n".join(main_lines),
] + [
f"==========={name}===========\n" + show_ctx(tks</s> | ===========above chunk 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
# offset: -1
def show_label(i: int):
return f" <{i}>" if i <= 9 else f"<{i}>"
def show_content(tks: TokenSeq):
if tks and tks[0] == Add_id:
return "+ " + decode_tokens(tks[1:])
elif tks and tks[0] == Del_id:
return "- " + decode_tokens(tks[1:])
else:
return " " + decode_tokens(tks)
def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]):
segs = output_ids_as_seqs(tks)
lines = []
for k, seg in segs.items():
if not seg:
continue # skip empty lines
if seg[-1] == Del_id:
# show the deleted line
+ origin_line = tk_splitlines(main_tk_lines.get(k, []))[0]
- origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0]
origin_line.append(Newline_id)
seg = seg + origin_line
label = show_label(id_map.get(</s>
===========below chunk 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
# offset: 1
<s>main_lines),
] + [
f"==========={name}===========\n" + show_ctx(tks)
for name, tks in self.all_ctxs().items()
]
return "\n".join(outputs)
===========unchanged ref 0===========
at: coeditor.common
Token = int
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
is_extra_id(tk: int) -> bool
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
meta_data_lines() -> list[str]
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 2===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 3===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
|
coeditor.encoding/TokenizedEdit.is_repetitive_edit | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> for line in tk_splitlines(seg):
| # module: coeditor.encoding
class TokenizedEdit(ABC):
def is_repetitive_edit(self, blue_threshold=0.8) -> bool:
<s>strip()
return encode_single_line(s)
else:
return []
+ ctx_lines = tk_splitlines(self.input_tks)
- ctx_lines = split_list(self.input_tks, Newline_id)
main_lines = output_ids_as_seqs(self.input_tks)
ctx_addtions = [tks for l in ctx_lines if (tks := get_changes(l, Add_id))]
ctx_deletions = [tks for l in ctx_lines if (tks := get_changes(l, Del_id))]
def has_match(line, line_key: Token):
if line:
if line[0] == Add_id:
added = line[1:]
return any(
as_any(sentence_bleu([ref], added)) > blue_threshold
for ref in ctx_addtions
)
elif line == [Del_id]:
if line_key not in main_lines:
print(f"Key {decode_tokens([line_key])} not found.")
print("Main tokens:")
print(decode_tokens(self.main_tks))
deleted = main_lines[line_key]
return any(
as_any(sentence_bleu([ref], deleted)) > blue_threshold
for ref in ctx_deletions
)
else:
raise ValueError(f"Unexpected line: {decode_tokens(line)}")
else:
return True
out_segs = output_ids_as_seqs(self.output_tks)
if all(not s for s in out_segs.values()):
return False
for k, seg in out_segs.items():
- for line in split_list(seg, Newline_id):
<0> if not has_match(line, k):
return False
return True
| ===========above chunk 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def is_repetitive_edit(self, blue_threshold=0.8) -> bool:
# offset: -1
"""Check if all additions in the output_tokens can be matched to
an addition in the input_tokens with a BLEU score above the threshold."""
def get_changes(tks, key_tk: Token):
if tks and tks[0] == key_tk:
s = decode_tokens(tks[1:])
s.strip()
return encode_single_line(s)
else:
return []
+ ctx_lines = tk</s>
===========unchanged ref 0===========
at: coeditor._utils
as_any(x) -> Any
at: coeditor.common
Token = int
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
encode_single_line(text: str, add_special_tokens=False) -> TokenSeq
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
at: nltk.translate.bleu_score
sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 2===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 3===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
===========changed ref 4===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
def show_label(i: int):
return f" <{i}>" if i <= 9 else f"<{i}>"
def show_content(tks: TokenSeq):
if tks and tks[0] == Add_id:
return "+ " + decode_tokens(tks[1:])
elif tks and tks[0] == Del_id:
return "- " + decode_tokens(tks[1:])
else:
return " " + decode_tokens(tks)
def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]):
segs = output_ids_as_seqs(tks)
lines = []
for k, seg in segs.items():
if not seg:
continue # skip empty lines
if seg[-1] == Del_id:
# show the deleted line
+ origin_line = tk_splitlines(main_tk_lines.get(k, []))[0]
- origin_line = split_list(main_tk_lines.get(k, []), Newline_id)[0]
origin_line.append(Newline_id)
seg = seg + origin_line
label = show_label(id_map.get(k, -1))
lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}")
return "".join(lines)
def show_ctx(ctx_tks: TokenSeq):
+ lines = tk_splitlines(ctx_tks)
- lines = split_list(ctx_tks, Newline_id)
return "\n".join(" " + show_content(l) for l in lines)
main_segs = output_ids_as_seqs(self.main_tks)
id_map = {k: i for i, k in enumerate(main_segs)}
</s> |
coeditor.encoding/compress_change_tks | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> lines = tk_splitlines(tks)
| # module: coeditor.encoding
def compress_change_tks(tks: TokenSeq, max_ctx: int):
- lines = split_list(tks, sep=Newline_id)
<0> to_keep = [False for _ in lines]
# mark which lines to keep
for i, line in enumerate(lines):
if line and (line[0] == Add_id or line[0] == Del_id):
for j in range(max(0, i - max_ctx), min(len(lines), i + max_ctx + 1)):
to_keep[j] = True
new_lines = list[TokenSeq]()
i = 0
while i < len(lines):
if to_keep[i]:
new_lines.append(lines[i])
i += 1
else:
j = i + 1
while j < len(lines) and not to_keep[j]:
j += 1
new_lines.append(_OMIT)
i = j
return join_list(new_lines, sep=Newline_id)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
encode_single_line(text: str, add_special_tokens=False) -> TokenSeq
TokenizedEdit()
at: typing
TypeVar(name: str, *constraints: Type[Any], bound: Union[None, Type[Any], str]=..., covariant: bool=..., contravariant: bool=...)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def is_repetitive_edit(self, blue_threshold=0.8) -> bool:
"""Check if all additions in the output_tokens can be matched to
an addition in the input_tokens with a BLEU score above the threshold."""
def get_changes(tks, key_tk: Token):
if tks and tks[0] == key_tk:
s = decode_tokens(tks[1:])
s.strip()
return encode_single_line(s)
else:
return []
+ ctx_lines = tk_splitlines(self.input_tks)
- ctx_lines = split_list(self.input_tks, Newline_id)
main_lines = output_ids_as_seqs(self.input_tks)
ctx_addtions = [tks for l in ctx_lines if (tks := get_changes(l, Add_id))]
ctx_deletions = [tks for l in ctx_lines if (tks := get_changes(l, Del_id))]
def has_match(line, line_key: Token):
if line:
if line[0] == Add_id:
added = line[1:]
return any(
as_any(sentence_bleu([ref], added)) > blue_threshold
for ref in ctx_addtions
)
elif line == [Del_id]:
if line_key not in main_lines:
print(f"Key {decode_tokens([line_key])} not found.")
print("Main tokens:")
print(decode_tokens(self.main_tks))
deleted = main_lines[line_key]
return any(
as_any(sentence_bleu([ref], deleted)) > blue_threshold
for ref in ctx_deletions
)
else:
raise ValueError(f"Unexpected line: {decode_tokens(line)}")
else:
return True
out_segs = output_ids_as_</s>
===========changed ref 2===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def is_repetitive_edit(self, blue_threshold=0.8) -> bool:
# offset: 1
<s> {decode_tokens(line)}")
else:
return True
out_segs = output_ids_as_seqs(self.output_tks)
if all(not s for s in out_segs.values()):
return False
for k, seg in out_segs.items():
+ for line in tk_splitlines(seg):
- for line in split_list(seg, Newline_id):
if not has_match(line, k):
return False
return True
===========changed ref 3===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 4===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 5===========
# module: coeditor.encoding
def change_tks_to_original_delta(change: TokenSeq) -> tuple[TokenSeq, TkDelta]:
+ diffs = tk_splitlines(change)
- diffs = split_list(change, Newline_id)
input_lines: list[TokenSeq] = []
line_delta: list[TokenSeq] = []
deltas = dict[int, tuple[TokenSeq, ...]]()
for diff_line in diffs:
if diff_line and diff_line[0] == Add_id:
line_delta.append(diff_line)
elif diff_line and diff_line[0] == Del_id:
line_delta.append([Del_id])
deltas[len(input_lines)] = tuple(line_delta)
del diff_line[:1]
input_lines.append(diff_line)
line_delta = []
else:
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
line_delta = []
input_lines.append(diff_line)
if line_delta:
deltas[len(input_lines)] = tuple(line_delta)
str_delta = TkDelta(deltas)
input = join_list(input_lines, Newline_id)
return input, str_delta
|
coeditor.c3problem/C3Problem.line_ids_to_input_lines | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> for i, tks in enumerate(tk_splitlines(change_tks)):
| # module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
<0> if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
| ===========unchanged ref 0===========
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.encoding
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 2===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 3===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 5===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 6===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
===========changed ref 7===========
# module: coeditor.encoding
def compress_change_tks(tks: TokenSeq, max_ctx: int):
+ lines = tk_splitlines(tks)
- lines = split_list(tks, sep=Newline_id)
to_keep = [False for _ in lines]
# mark which lines to keep
for i, line in enumerate(lines):
if line and (line[0] == Add_id or line[0] == Del_id):
for j in range(max(0, i - max_ctx), min(len(lines), i + max_ctx + 1)):
to_keep[j] = True
new_lines = list[TokenSeq]()
i = 0
while i < len(lines):
if to_keep[i]:
new_lines.append(lines[i])
i += 1
else:
j = i + 1
while j < len(lines) and not to_keep[j]:
j += 1
new_lines.append(_OMIT)
i = j
return join_list(new_lines, sep=Newline_id)
|
coeditor.c3problem/C3GeneratorCache.create_problem | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> for i, tks in enumerate(tk_splitlines(changed_code)):
| # module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
<s>()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
target_set = set(target_lines)
line_ids = list[int]()
input_l = target.line_range[0]
- for i, tks in enumerate(split_list(changed_code, Newline_id)):
<0> if tks and tks[0] == Del_id:
continue
if input_l in target_set:
line_ids.append(i)
input_l += 1
code_span = dataclasses.replace(
code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
)
relevant_unchanged = self.get_relevant_unchanged(code_span, target_usages)
prob = C3Problem(
code_span,
line_ids,
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
| ===========above chunk 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: -1
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
</s>
===========unchanged ref 0===========
at: coeditor.c3problem
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
LineUsageAnalysis(line2usages: Mapping[int, set[PyDefinition]])
at: coeditor.c3problem.C3GeneratorCache
get_relevant_unchanged(this_change: ChangedCodeSpan, line_usages: LineUsageAnalysis)
to_code_span(span: ChangedSpan)
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
delta: TkDelta
at: coeditor.change.Added
after: E1
map(f: Callable[[E1], T2]) -> "Added[T2]"
at: coeditor.change.Deleted
before: E1
map(f: Callable[[E1], T2]) -> "Deleted[T2]"
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
map(f: Callable[[E1], T2]) -> "Modified[T2]"
at: coeditor.common
ModuleName = str
at: coeditor.encoding
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
===========unchanged ref 1===========
at: coeditor.encoding.TkDelta
apply_to_change(change: TokenSeq) -> TokenSeq
empty() -> "TkDelta"
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JModuleChange(module_change: Change[JModule], changed: Sequence[ChangedSpan])
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
and (signatures := name._get_docstring_signature() or name.get_line_code())
):
full_name = PyFullName(full_name)
start_pos = name.get_definition_start_position()
end_pos = name.get_definition_end_position()
signatures = name._get_docstring_signature()
if name.type == "module":
return
if signatures:
+ signatures = signatures
- signatures = "sig: " + signatures
else:
signatures = name.get_line_code()
yield PyDefinition(full_name, start_pos, end_pos, signatures)
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
+ for i, tks in enumerate(tk_splitlines(change_tks)):
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
===========changed ref 4===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 5===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
|
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> origin_lines = tk_splitlines(original)
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
span = problem.span
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
- origin_lines = split_list(original, Newline_id)
<0> edit_start = problem.edit_line_ids[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
for i, l in enumerate(problem.edit_line_ids):
for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(</s> | ===========below chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 1
<s>_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk</s>
===========below chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 2
<s> below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
if ref_size_sum < self.max_ref_tks_sum:
unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged)
for i, chunk in enumerate(unchanged):
all_refs.append((f"unchanged ref {i}", chunk))
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", chunk))
ref_size_sum += sum(len(x) for x in changed)
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for (name, ref) in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
continue
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input),
TkArray.new(scope_tks),
TkArray.new(chunk_output),
path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.4"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Mapping[PyFullName, PyDefinition]) -> Sequence[TkArray]
_group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TkArray]
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
|
coeditor.service/get_tk_lines | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> lines = tk_splitlines(tks)
| # module: coeditor.service
def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
- lines = split_list(tks, Newline_id)
<0> return join_list((lines[i] for i in line_ids), Newline_id)
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
tk_splitlines(tks: TokenSeq)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 1===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 2===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
and (signatures := name._get_docstring_signature() or name.get_line_code())
):
full_name = PyFullName(full_name)
start_pos = name.get_definition_start_position()
end_pos = name.get_definition_end_position()
signatures = name._get_docstring_signature()
if name.type == "module":
return
if signatures:
+ signatures = signatures
- signatures = "sig: " + signatures
else:
signatures = name.get_line_code()
yield PyDefinition(full_name, start_pos, end_pos, signatures)
===========changed ref 4===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
+ for i, tks in enumerate(tk_splitlines(change_tks)):
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
===========changed ref 5===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 6===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 7===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
===========changed ref 8===========
# module: coeditor.encoding
def tokens_to_change(tokens: TokenSeq) -> Modified[str]:
"Decode a token sequence into a change."
+ tk_lines = tk_splitlines(tokens)
- tk_lines = split_list(tokens, Newline_id)
before_lines = list[TokenSeq]()
after_lines = list[TokenSeq]()
for tk_line in tk_lines:
if tk_line and tk_line[0] == Add_id:
after_lines.append(tk_line[1:])
elif tk_line and tk_line[0] == Del_id:
before_lines.append(tk_line[1:])
else:
before_lines.append(tk_line)
after_lines.append(tk_line)
before_code = decode_tokens(join_list(before_lines, Newline_id))
after_code = decode_tokens(join_list(after_lines, Newline_id))
return Modified(before_code, after_code)
|
tests.test_edits/assert_change_eq | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> assert_str_equal(get_after(actual), get_after(expected), name)
| # module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected), name)
- assert_str_equal(get_before(actual), get_before(expected))
- assert_str_equal(get_after(actual), get_after(expected))
<0>
| ===========unchanged ref 0===========
at: coeditor.change
Change = Added[E1] | Deleted[E1] | Modified[E1]
at: coeditor.common
assert_str_equal(actual: str, expect: str, name: str | None=None)
at: tests.test_edits
get_before(change: Change[str]) -> str
get_after(change: Change[str]) -> str
===========changed ref 0===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 1===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 2===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 3===========
# module: coeditor.service
def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
+ lines = tk_splitlines(tks)
- lines = split_list(tks, Newline_id)
return join_list((lines[i] for i in line_ids), Newline_id)
===========changed ref 4===========
# module: coeditor.service
- def replace_lines(text: str, span: CodeRange, replacement: str):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- replacemnet = textwrap.indent(textwrap.dedent(replacement), " " * span[0][1])
- old_lines = text.split("\n")
- new_lines = old_lines[:start_ln] + [replacemnet] + old_lines[end_ln + 1 :]
- return "\n".join(new_lines)
-
===========changed ref 5===========
# module: coeditor.service
- def get_span(text: str, span: CodeRange):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- old_lines = text.split("\n")
- new_lines = old_lines[start_ln : end_ln + 1]
- new_lines[0] = new_lines[0][span[0][1] :]
- new_lines[-1] = new_lines[-1][: span[1][1]]
- return "\n".join(new_lines)
-
===========changed ref 6===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
and (signatures := name._get_docstring_signature() or name.get_line_code())
):
full_name = PyFullName(full_name)
start_pos = name.get_definition_start_position()
end_pos = name.get_definition_end_position()
signatures = name._get_docstring_signature()
if name.type == "module":
return
if signatures:
+ signatures = signatures
- signatures = "sig: " + signatures
else:
signatures = name.get_line_code()
yield PyDefinition(full_name, start_pos, end_pos, signatures)
===========changed ref 7===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
+ for i, tks in enumerate(tk_splitlines(change_tks)):
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
===========changed ref 8===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 9===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
===========changed ref 10===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_change(self, change: TokenSeq) -> TokenSeq:
+ lines = tk_splitlines(change)
- lines = split_list(change, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
elif action[0] == Del_id:
deleted = True
if deleted:
new_lines.append([Del_id] + line)
else:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action)
return join_list(new_lines, Newline_id)
|
tests.test_edits/assert_tks_eq | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> assert_str_equal(actual_str, expected_str, name)
| # module: tests.test_edits
def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str):
- if actual != expected:
- print_sections(
+ actual_str = decode_tokens(actual)
+ expected_str = decode_tokens(expected)
- ("Expected", decode_tokens(expected)),
- ("Reconstructed", decode_tokens(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
<0>
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
assert_str_equal(actual: str, expect: str, name: str | None=None)
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
===========changed ref 0===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 1===========
# module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected), name)
- assert_str_equal(get_before(actual), get_before(expected))
+ assert_str_equal(get_after(actual), get_after(expected), name)
- assert_str_equal(get_after(actual), get_after(expected))
===========changed ref 2===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 3===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 4===========
# module: coeditor.service
def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
+ lines = tk_splitlines(tks)
- lines = split_list(tks, Newline_id)
return join_list((lines[i] for i in line_ids), Newline_id)
===========changed ref 5===========
# module: coeditor.service
- def replace_lines(text: str, span: CodeRange, replacement: str):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- replacemnet = textwrap.indent(textwrap.dedent(replacement), " " * span[0][1])
- old_lines = text.split("\n")
- new_lines = old_lines[:start_ln] + [replacemnet] + old_lines[end_ln + 1 :]
- return "\n".join(new_lines)
-
===========changed ref 6===========
# module: coeditor.service
- def get_span(text: str, span: CodeRange):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- old_lines = text.split("\n")
- new_lines = old_lines[start_ln : end_ln + 1]
- new_lines[0] = new_lines[0][span[0][1] :]
- new_lines[-1] = new_lines[-1][: span[1][1]]
- return "\n".join(new_lines)
-
===========changed ref 7===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
and (signatures := name._get_docstring_signature() or name.get_line_code())
):
full_name = PyFullName(full_name)
start_pos = name.get_definition_start_position()
end_pos = name.get_definition_end_position()
signatures = name._get_docstring_signature()
if name.type == "module":
return
if signatures:
+ signatures = signatures
- signatures = "sig: " + signatures
else:
signatures = name.get_line_code()
yield PyDefinition(full_name, start_pos, end_pos, signatures)
===========changed ref 8===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
+ for i, tks in enumerate(tk_splitlines(change_tks)):
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
===========changed ref 9===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
===========changed ref 10===========
# module: coeditor.encoding
def code_to_input(code_tks: TokenSeq) -> TokenSeq:
"""
Convert the original code into model input by inserting <extra_id> tokens.
In this format, there will be an <extra_id> token at the beginning of each line.
An additional <extra_id> will be added at the end to allow appending.
"""
+ tk_lines = tk_splitlines(code_tks)
- tk_lines = split_list(code_tks, Newline_id)
tk_lines.append([])
input_seq = TokenSeq()
for i, line in enumerate(tk_lines):
if i < N_Extra_Ids:
input_seq.append(get_extra_id(i))
input_seq.extend(line)
if i < len(tk_lines) - 1:
input_seq.append(Newline_id)
return input_seq
|
tests.test_edits/test_splitlines | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
| # module: tests.test_edits
def test_splitlines():
for n in range(100):
rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
+ input = "".join(rand_input).rstrip("\n")
- input = fix_line_end("".join(rand_input))
lines = splitlines(input)
# basic identity
+ assert "\n".join(lines) == input
- assert "".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
- assert_tks_eq(join_list(tk_lines), enc, "join_list(tk_lines)")
<0>
| ===========unchanged ref 0===========
at: coeditor.common
splitlines(text: str) -> list[str]
count_lines(text: str) -> int
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.encoding
Newline_id = get_tk_id("\n")
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
encode_lines_join(text: str) -> TokenSeq
at: tests.test_edits
assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str)
at: tests.test_edits.test_splitlines
rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
===========changed ref 0===========
# module: tests.test_edits
def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str):
- if actual != expected:
- print_sections(
+ actual_str = decode_tokens(actual)
+ expected_str = decode_tokens(expected)
- ("Expected", decode_tokens(expected)),
- ("Reconstructed", decode_tokens(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
+ assert_str_equal(actual_str, expected_str, name)
===========changed ref 1===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 2===========
# module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected), name)
- assert_str_equal(get_before(actual), get_before(expected))
+ assert_str_equal(get_after(actual), get_after(expected), name)
- assert_str_equal(get_after(actual), get_after(expected))
===========changed ref 3===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 4===========
# module: coeditor.service
def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
+ lines = tk_splitlines(tks)
- lines = split_list(tks, Newline_id)
return join_list((lines[i] for i in line_ids), Newline_id)
===========changed ref 5===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 6===========
# module: coeditor.service
- def replace_lines(text: str, span: CodeRange, replacement: str):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- replacemnet = textwrap.indent(textwrap.dedent(replacement), " " * span[0][1])
- old_lines = text.split("\n")
- new_lines = old_lines[:start_ln] + [replacemnet] + old_lines[end_ln + 1 :]
- return "\n".join(new_lines)
-
===========changed ref 7===========
# module: coeditor.service
- def get_span(text: str, span: CodeRange):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- old_lines = text.split("\n")
- new_lines = old_lines[start_ln : end_ln + 1]
- new_lines[0] = new_lines[0][span[0][1] :]
- new_lines[-1] = new_lines[-1][: span[1][1]]
- return "\n".join(new_lines)
-
===========changed ref 8===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class PyDefinition:
@staticmethod
def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
if (
not name.in_builtin_module()
and (full_name := name.full_name)
and (signatures := name._get_docstring_signature() or name.get_line_code())
):
full_name = PyFullName(full_name)
start_pos = name.get_definition_start_position()
end_pos = name.get_definition_end_position()
signatures = name._get_docstring_signature()
if name.type == "module":
return
if signatures:
+ signatures = signatures
- signatures = "sig: " + signatures
else:
signatures = name.get_line_code()
yield PyDefinition(full_name, start_pos, end_pos, signatures)
===========changed ref 9===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
def line_ids_to_input_lines(self, line_ids: Sequence[int]) -> Sequence[int]:
"""Convert the edit lines (which are line ids including deleted lines) into
normal line numbers that do not include deleted lines."""
change_tks = self.span.delta.apply_to_change(self.span.original.tolist())
input_l = self.span.line_range[0]
input_lines = list[int]()
+ for i, tks in enumerate(tk_splitlines(change_tks)):
- for i, tks in enumerate(split_list(change_tks, Newline_id)):
if tks and tks[0] == Del_id:
continue
if i in line_ids:
input_lines.append(input_l)
input_l += 1
return input_lines
===========changed ref 10===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def apply_to_input(self, input: TokenSeq):
+ lines = tk_splitlines(input)
- lines = split_list(input, Newline_id)
new_lines = list[TokenSeq]()
for i, line in enumerate(lines):
deleted = False
if delta := self._deltas.get(i):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
elif action[0] == Del_id:
deleted = True
if not deleted:
new_lines.append(line)
if delta := self._deltas.get(len(lines)):
for action in delta:
if action[0] == Add_id:
new_lines.append(action[1:])
return join_list(new_lines, Newline_id)
|
tests.test_edits/TestChangeIdentities.test_str_encodings | Modified | temp-1 | 8b7c5296eb4ca6b0b898bd216b0369226d60f406 | handle trailing spaces in test. | <0>:<add> assert_str_equal(after, get_after(c), name)
| # module: tests.test_edits
class TestChangeIdentities:
def test_str_encodings(self):
for name, c in self.cases.items():
try:
line_diffs = change_to_line_diffs(c)
print("line_diffs\n------\n" + "\n".join(line_diffs))
before, delta = line_diffs_to_original_delta(line_diffs)
print("before:")
print(before)
print("delta:", delta)
+ assert_str_equal(before, get_before(c), name)
- assert_str_equal(before, get_before(c))
after = delta.apply_to_input(before)
- assert_str_equal(after, get_after(c))
<0> except Exception:
print_err(f"Failed for case: {name}")
raise
| ===========unchanged ref 0===========
at: coeditor.common
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
assert_str_equal(actual: str, expect: str, name: str | None=None)
at: coeditor.encoding
change_to_line_diffs(change: Change[str]) -> list[str]
line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta]
at: tests.test_edits
get_before(change: Change[str]) -> str
get_after(change: Change[str]) -> str
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========changed ref 0===========
# module: coeditor.common
+ def assert_str_equal(actual: str, expect: str, name: str | None = None):
- def assert_str_equal(actual: str, expect: str):
+ actual = actual.rstrip()
+ expect = expect.rstrip()
if actual != expect:
print_err(f"{expect = }")
print_err(f"{actual = }")
print_err("String difference:")
diff = show_string_diff(expect, actual)
print_err(diff)
+ raise AssertionError(f"Strings didn't match: {name}")
- raise AssertionError("Strings didn't match.")
===========changed ref 1===========
# module: tests.test_edits
def assert_change_eq(actual: Change[str], expected: Change[str], name: str):
+ assert_str_equal(get_before(actual), get_before(expected), name)
- assert_str_equal(get_before(actual), get_before(expected))
+ assert_str_equal(get_after(actual), get_after(expected), name)
- assert_str_equal(get_after(actual), get_after(expected))
===========changed ref 2===========
# module: tests.test_edits
def assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str):
- if actual != expected:
- print_sections(
+ actual_str = decode_tokens(actual)
+ expected_str = decode_tokens(expected)
- ("Expected", decode_tokens(expected)),
- ("Reconstructed", decode_tokens(actual)),
- )
- raise ValueError(f"Failed for case: {name}")
+ assert_str_equal(actual_str, expected_str, name)
===========changed ref 3===========
# module: tests.test_edits
def test_splitlines():
for n in range(100):
rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
+ input = "".join(rand_input).rstrip("\n")
- input = fix_line_end("".join(rand_input))
lines = splitlines(input)
# basic identity
+ assert "\n".join(lines) == input
- assert "".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
+ assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
- assert_tks_eq(join_list(tk_lines), enc, "join_list(tk_lines)")
===========changed ref 4===========
# module: coeditor.encoding
+ def tk_splitlines(tks: TokenSeq):
+ return split_list(tks, Newline_id)
+
===========changed ref 5===========
# module: coeditor.common
+ def fix_newline(text: str):
+ if text.endswith("\n"):
+ return text
+ return text + "\n"
+
===========changed ref 6===========
# module: coeditor.service
def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
+ lines = tk_splitlines(tks)
- lines = split_list(tks, Newline_id)
return join_list((lines[i] for i in line_ids), Newline_id)
===========changed ref 7===========
# module: coeditor.service
- def replace_lines(text: str, span: CodeRange, replacement: str):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- replacemnet = textwrap.indent(textwrap.dedent(replacement), " " * span[0][1])
- old_lines = text.split("\n")
- new_lines = old_lines[:start_ln] + [replacemnet] + old_lines[end_ln + 1 :]
- return "\n".join(new_lines)
-
===========changed ref 8===========
# module: coeditor.service
- def get_span(text: str, span: CodeRange):
- start_ln, end_ln = span[0][0] - 1, span[1][0]
- old_lines = text.split("\n")
- new_lines = old_lines[start_ln : end_ln + 1]
- new_lines[0] = new_lines[0][span[0][1] :]
- new_lines[-1] = new_lines[-1][: span[1][1]]
- return "\n".join(new_lines)
- |
coeditor.c3problem/C3ProblemSimpleSplit.transform | Modified | temp-1 | 93d3a66a05da77b6c712854834bafe308e32b75b | - Fix dataclasses.replace. - Fix module usages in `pre_edit_analysis` - Sort changes using heuristic. | <0>:<add> prob, edit_line_ids=range(i, j), transformations=new_trans
| # module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
sub_prob = dataclasses.replace(
- prob, edit_lines=range(i, j), transformations=new_trans
<0> )
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
| ===========unchanged ref 0===========
at: coeditor.c3problem
ChangedHeader(change_tks: TkArray, type: str, line_range: LineRange, path: ProjectPath)
at: coeditor.c3problem.C3GeneratorCache.__init__
self._header_cache = dict[ProjectPath, ChangedHeader]()
self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
at: coeditor.change.Added
map(f: Callable[[E1], T2]) -> "Added[T2]"
at: coeditor.change.Deleted
map(f: Callable[[E1], T2]) -> "Deleted[T2]"
at: coeditor.change.Modified
map(f: Callable[[E1], T2]) -> "Modified[T2]"
at: coeditor.encoding
line_diffs_to_original_delta(diffs: list[str]) -> tuple[str, StrDelta]
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
at: coeditor.scoped_changes.ChangeScope
path: ProjectPath
tree: ScopeTree
spans: Sequence["StatementSpan"]
subscopes: Mapping[str, Self]
parent_scope: "ChangeScope | None"
at: coeditor.scoped_changes.ChangeScope.__post_init__
self.header_line_range: LineRange = header_line_range
at: coeditor.scoped_changes.ChangedSpan
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
new(tks: Sequence[int]) -> "TkArray"
at: parso.python.tree.Class
type = 'classdef'
__slots__ = ()
===========unchanged ref 1===========
at: parso.python.tree.Function
type = 'funcdef'
at: parso.python.tree.Module
__slots__ = ('_used_names',)
type = 'file_input'
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
+ def sort_changes(
+ self,
+ target: ChangedCodeSpan,
+ used_defs: Mapping[PyFullName, PyDefinition],
+ changed: Sequence[ChangedCodeSpan],
+ ) -> Sequence[ChangedCodeSpan]:
+ def distance_penalty(cspan: ChangedCodeSpan) -> int:
+ if cspan.module != target.module:
+ return self.max_distance_penalty
+ dis_above = abs(target.line_range[0] - cspan.line_range[1])
+ dis_below = abs(cspan.line_range[0] - target.line_range[1])
+ return min(self.max_distance_penalty, dis_above, dis_below)
+
+ def usage_penalty(cspan: ChangedCodeSpan) -> int:
+ path = cspan.headers[-1].path
+ fullname = path.module + "." + path.path
+ if fullname in used_defs:
+ return -self.usage_bonus
+ return 0
+
+ def length_penalty(cspan: ChangedCodeSpan) -> int:
+ return len(cspan.original) + cspan.delta.change_size()
+
+ result = list(changed)
+ result.sort(
+ key=lambda x: distance_penalty(x) + usage_penalty(x) + length_penalty(x)
+ )
+ return result
+
===========changed ref 1===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
"""
### Change log
+ - v2.8: Fix module usages in `pre_edit_analysis`. Sort changes using heuristic.
- v2.7: Use new PyDefiniton that includes signatures.
- v2.6: fix missing changes in `JModuleChanges`. Rename to edit_line_ids.
- v2.5: fix newline encoding bug.
- v2.4: fix buggy encoding of `Added` and `Deleted` changes.
- v2.3: always generate problems with full editing range and move the problem
splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`.
"""
+ VERSION = "2.8"
- VERSION = "2.7"
# change spans with more than this many lines will be ignored
max_span_lines: int = 500
===========changed ref 2===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
def pre_edit_analysis(
self,
pstate: ProjectState,
modules: Mapping[RelPath, JModule],
changes: Mapping[ModuleName, JModuleChange],
) -> Mapping[ModuleName, LineUsageAnalysis]:
"Return the definition usages of each line."
result = dict[ModuleName, LineUsageAnalysis]()
src_map = {m.mname: f for f, m in modules.items()}
for mname, mchange in changes.items():
+ result[mname] = LineUsageAnalysis({})
if not isinstance(mchange.module_change, Modified):
continue
lines_to_analyze = set[int]()
for span in mchange.changed:
+ if not isinstance(span.change, Modified):
- if span.change is Added:
continue
lines_to_analyze.update(span.line_range.to_range())
lines_to_analyze.update(span.header_line_range.to_range())
+ if not lines_to_analyze:
+ continue
mod_path = src_map[mname]
script = pstate.scripts[mod_path]
line_usages = self.analyzer.get_line_usages(
script, lines_to_analyze, silent=True
)
result[mname] = line_usages
return result
|
coeditor.c3problem/C3ProblemChangeDropout.transform | Modified | temp-1 | 93d3a66a05da77b6c712854834bafe308e32b75b | - Fix dataclasses.replace. - Fix module usages in `pre_edit_analysis` - Sort changes using heuristic. | <0>:<add> edit_line_ids=edit_line_ids,
| # module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
<s> new_original = TkArray.new(delta1.apply_to_change(original.tolist()))
new_trans = prob.transformations + ("split", "dropout")
new_span = dataclasses.replace(
prob.span, original=new_original, delta=delta2
)
else:
new_trans = prob.transformations + ("split",)
new_span = prob.span
delta1 = None
delta2_groups = delta.change_groups()
prob_and_n = list[tuple[C3Problem, int]]()
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
+ edit_line_ids = range(i, j)
- edit_lines = range(i, j)
if delta1 is not None:
+ edit_line_ids = delta1.get_new_target_lines(edit_line_ids)
- edit_lines = delta1.get_new_target_lines(edit_lines)
+ line_set = set(edit_line_ids)
- line_set = set(edit_lines)
n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups)
if n_groups > 0:
sub_prob = dataclasses.replace(
prob,
span=new_span,
- edit_lines=edit_lines,
<0> transformations=new_trans,
)
prob_and_n.append((sub_prob, n_groups))
# return the problems with the most changes
prob_and_n.sort(key=lambda p: p[1], reverse=True)
probs = [p[0] for p in prob_and_n]
return probs[: self.max_split_factor]
| ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: -1
<s>.change_groups()
should_dropout = len(grouped_keys) >= 2
if should_dropout:
n_to_drop = int(
len(grouped_keys) * random.random() * self.max_dropout_ratio
)
assert n_to_drop < len(grouped_keys)
keys_to_drop = join_list(
random_subset(grouped_keys, n_to_drop, rng=self._rng)
)
else:
keys_to_drop = []
if keys_to_drop:
delta1, delta2 = delta.decompose_for_change(keys_to_drop)
if random.random() < self._test_prob:
result1 = delta2.apply_to_change(
delta1.apply_to_change(original.tolist())
)
result2 = delta.apply_to_change(original.tolist())
code1 = tokens_to_change(result1).after
code2 = tokens_to_change(result2).after
if code1 != code2:
print_sections(
("result1", decode_tokens(result1)),
("result2", decode_tokens(result2)),
("delta", str(delta)),
("keys_to_drop", str(keys_to_drop)),
("delta1", str(delta1)),
("delta2", str(delta2)),
)
raise AssertionError("decompose_for_change failed.")
delta2_groups = delta2.change_groups()
if not delta2_groups:
print_err(f"{delta=}, {keys_to_drop=}, {delta1=}")
raise AssertionError("Empty delta2_groups")
new_original = TkArray.new(delta1.apply_to_change(original.tolist()))
new_trans</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: -2
original = prob.span.original
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
grouped_keys = delta.change_groups()
should_dropout = len(grouped_keys) >= 2
if should_dropout:
n_</s>
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3ProblemTransform()
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemSimpleSplit
max_lines_to_edit: int = 25
max_split_factor: int = 4
at: coeditor.c3problem.C3ProblemTransform
transform(self, prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
delta: TkDelta
at: coeditor.common
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
at: coeditor.encoding
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
apply_to_change(change: TokenSeq) -> TokenSeq
for_input_range(line_range: tuple[int, int]) -> Self
decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self]
===========unchanged ref 1===========
change_groups() -> Sequence[tuple[DeltaKey, ...]]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
replace(obj: _T, **changes: Any) -> _T
at: random
Random(x: Any=...)
random = _inst.random
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
sub_prob = dataclasses.replace(
+ prob, edit_line_ids=range(i, j), transformations=new_trans
- prob, edit_lines=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
|
coeditor.model/RetrievalEditorModel.train_on_data | Modified | temp-1 | dc0c6ac79c708b98a9b584d564abb3ab2a411ddf | - Fix dataloader _post_process. - Save model more frequently during training. | <0>:<add> # callbacks=[EarlyStoppingCallback(early_stopping_patience=1)],
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
<s>train_loader)
cprint("blue", "Number of training batches (estimate):", epoch_steps)
trainer_args = Seq2SeqTrainingArguments(
output_dir=str(train_dir),
overwrite_output_dir=True,
evaluation_strategy="epoch",
+ save_strategy="steps",
- save_strategy="epoch",
+ save_steps=max(1, min(5000, epoch_steps // 5)),
logging_steps=max(1, min(1000, epoch_steps // 10)),
num_train_epochs=train_args.max_train_epochs,
+ save_total_limit=3,
- save_total_limit=2,
lr_scheduler_type=train_args.lr_scheduler_type,
learning_rate=train_args.learning_rate,
weight_decay=train_args.weight_decay,
metric_for_best_model="loss_per_tk",
greater_is_better=False,
fp16=True,
+ # load_best_model_at_end=True,
- load_best_model_at_end=True,
push_to_hub=False,
report_to=["wandb"],
disable_tqdm=True,
# torchdynamo="inductor", # use compiled model
)
trainer = DynamicTrainer(
self,
trainer_args,
- callbacks=[EarlyStoppingCallback(early_stopping_patience=1)],
<0> )
trainer.train()
save_dir = get_model_dir(trained=True) / training_name
self.save(save_dir)
print("Model saved to:", save_dir)
| ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
# offset: -1
train_dir = get_model_dir(trained=False) / training_name
eval_loader.tqdm_args = {"disable": True}
model = self
# model = torch.compile(self.to("cuda")) # pytorch doesn't support python 3.11 yet.
class DynamicTrainer(Seq2SeqTrainer):
def get_train_dataloader(self):
return train_loader
def get_eval_dataloader(self, eval_dataset):
return eval_loader
def evaluation_loop(
self,
dataloader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
metrics = model.eval_loss_on_loader(as_any(dataloader))
n_samples = metrics["loss_per_ex"].weight
metrics = {
f"{metric_key_prefix}_{k}": v.mean() for k, v in metrics.items()
}
return EvalLoopOutput(
predictions=tuple(),
label_ids=tuple(),
metrics=metrics,
num_samples=n_samples,
)
epoch_steps = len(train_loader)
cprint("blue", "Number of training batches (estimate):", epoch_steps)
trainer_args</s>
===========unchanged ref 0===========
at: coeditor._utils
as_any(x) -> Any
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.common
get_model_dir(trained=True) -> Path
at: coeditor.common.WeightedSum
sum: V
weight: W
mean() -> float
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
eval_loss_on_loader(dataloader: "C3DataLoader")
decorate_autocast(dataloader: "C3DataLoader")
save(save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None)
===========unchanged ref 1===========
at: coeditor.model.TrainingArgs
learning_rate: float = 2e-5
weight_decay: float = 0.01
max_train_epochs: int = 3
reinit_weights: bool = False
quicktest: bool = False
lr_scheduler_type: SchedulerType = SchedulerType.LINEAR
at: transformers.trainer.Trainer
get_train_dataloader(self) -> DataLoader
get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader
train(resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union["optuna.Trial", Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs)
evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str="eval") -> EvalLoopOutput
at: transformers.trainer_seq2seq
Seq2SeqTrainer(model: Union["PreTrainedModel", nn.Module]=None, args: "TrainingArguments"=None, data_collator: Optional["DataCollator"]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional["PreTrainedTokenizerBase"]=None, model_init: Optional[Callable[[], "PreTrainedModel"]]=None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]]=None, callbacks: Optional[List["TrainerCallback"]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None)
at: transformers.trainer_utils
EvalLoopOutput(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
===========unchanged ref 2===========
at: transformers.training_args.TrainingArguments
framework = "pt"
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
evaluation_strategy: Union[IntervalStrategy, str] = field(
default="no",
metadata={"help": "The evaluation strategy to use."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
)
},
)
|
coeditor.c3problem/C3ProblemTokenizer._group_encode_unchanged_refs | Modified | temp-1 | 7f98c4b39feed113e3d26b70307448229081e7c1 | Add class sibling usages. Improve used ref ordering. | <0>:<add> last_parent = parent
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def _group_encode_unchanged_refs(
self, elems: Mapping[PyFullName, PyDefinition]
) -> Sequence[TkArray]:
+ def sort_key(e: PyDefinition):
+ return (e.parent, min(e.start_locs, default=(0, 0)))
+
results = list[TkArray]()
this_chunk = TokenSeq()
+ sorted_elems = [e for e in elems.values() if e.signatures and e.parent]
+ sorted_elems.sort(key=sort_key)
+ last_parent = None
+ for defn in sorted_elems:
+ parent = defn.parent
+ header = f"at: {parent}\n" if parent != last_parent else ""
+ text = header + indent("\n".join(s for s in defn.signatures), TAB) + "\n\n"
- for name, defn in elems.items():
- parent = ".".join(split_dots(name)[:-1])
- if not parent:
- continue
- text = f"at: {parent}\n{defn.signatures}\n\n"
tks = encode_lines_join(text)
tks = truncate_section(
tks, TruncateAt.Right, self.max_ref_tks, inplace=True
)
if len(this_chunk) + len(tks) > self.max_ref_tks:
results.append(TkArray.new(this_chunk))
this_chunk = tks
else:
this_chunk.extend(tks)
<0> if this_chunk:
results.append(TkArray.new(this_chunk))
return results
| ===========unchanged ref 0===========
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.5"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
at: coeditor.c3problem.C3ProblemTokenizer._group_encode_unchanged_refs
sort_key(e: PyDefinition)
at: coeditor.c3problem.PyDefinition
start_locs: set[tuple[int, int]]
signatures: set[str]
at: coeditor.c3problem.PyDefinition.__post_init__
self.parent = ".".join(split_dots(self.full_name)[:-1])
at: coeditor.common
TokenSeq = list[Token]
TAB = " " * 4
at: coeditor.encoding
encode_lines_join(text: str) -> TokenSeq
TruncateAt()
truncate_section(sec: TokenSeq, direction: TruncateAt.Value, limit: int, add_bos: bool=True, inplace: bool=False) -> TokenSeq
at: coeditor.encoding.TruncateAt
Value = int
Left = 0
Right = 1
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
new(tks: Sequence[int]) -> "TkArray"
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
at: typing.Mapping
values() -> ValuesView[_VT_co]
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
"""
## Change log
+ - 2.5: Sort used references by path.
- 2.4: Encode each changed reference individually. Encode signatures for unchanged.
"""
+ VERSION = "2.5"
- VERSION = "2.4"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class LineUsageAnalysis:
+ line2usages: Mapping[int, Sequence[PyDefinition]]
- line2usages: Mapping[int, set[PyDefinition]]
===========changed ref 2===========
# module: coeditor.c3problem
+ @dataclass
- @dataclass(frozen=True)
class PyDefinition:
+ def __post_init__(self):
+ self.parent = ".".join(split_dots(self.full_name)[:-1])
+
===========changed ref 3===========
# module: coeditor.c3problem
class C3GeneratorCache:
def get_relevant_unchanged(
self,
this_change: ChangedCodeSpan,
line_usages: LineUsageAnalysis,
):
module = this_change.module
# parent defs are also considered as used
+ name2def = dict[PyFullName, PyDefinition]()
- sorted_defs = dict[PyFullName, PyDefinition]()
all_lines = set(this_change.line_range.to_range())
all_lines.update(this_change.headers[-1].line_range.to_range())
for l in all_lines:
+ for pydef in line_usages.line2usages.get(l, []):
- for pydef in line_usages.line2usages.get(l, set()):
- if (
+ if pydef.full_name.startswith(module) and any(
- pydef.full_name.startswith(module)
- and pydef.start_pos
- and pydef.start_pos[0] in all_lines
+ l in all_lines for l in pydef.start_locs
):
# skip self references
continue
+ name2def.setdefault(pydef.full_name, pydef)
- sorted_defs.setdefault(pydef.full_name, pydef)
+ return {k: name2def[k] for k in sorted(name2def.keys())}
- return sorted_defs
-
===========changed ref 4===========
# module: coeditor.c3problem
+ @dataclass
- @dataclass(frozen=True)
class PyDefinition:
+ full_name: PyFullName
+ start_locs: set[tuple[int, int]]
+ signatures: set[str]
- """Note that the module and positions can be referring to either the import
- statement or the actual definition."""
- full_name: PyFullName
- start_pos: tuple[int, int] | None
- end_pos: tuple[int, int] | None
- signatures: str
-
===========changed ref 5===========
# module: coeditor.c3problem
+ @dataclass
- @dataclass(frozen=True)
class PyDefinition:
+ def update(self, name: classes.BaseName):
+ if name.type not in ("function", "statement", "class"):
+ return
+ assert_eq(name.full_name, self.full_name)
+ if loc := name.get_definition_start_position():
+ self.start_locs.add(loc)
+
+ if name.type == "statement":
+ stmt = name._name.tree_name.search_ancestor("simple_stmt")
+ if stmt:
+ assert isinstance(stmt, ptree.PythonNode)
+ self.signatures.add(stmt.get_code(include_prefix=False).strip())
+ return
+
+ for sig in name._get_signatures(for_docstring=True):
+ self.signatures.add(sig.to_string().strip())
+
===========changed ref 6===========
# module: coeditor.c3problem
+ @dataclass
- @dataclass(frozen=True)
class PyDefinition:
- @staticmethod
- def from_name(name: classes.BaseName) -> Iterable["PyDefinition"]:
- if (
- not name.in_builtin_module()
- and (full_name := name.full_name)
- and (signatures := name._get_docstring_signature() or name.get_line_code())
- ):
- full_name = PyFullName(full_name)
- start_pos = name.get_definition_start_position()
- end_pos = name.get_definition_end_position()
- signatures = name._get_docstring_signature()
- if name.type == "module":
- return
- if signatures:
- signatures = signatures
- else:
- signatures = name.get_line_code()
-
- yield PyDefinition(full_name, start_pos, end_pos, signatures)
-
===========changed ref 7===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
"""
### Change log
+ - v2.9: Add sibling usages for class members. Improve statement signatures.
- v2.8: Fix module usages in `pre_edit_analysis`. Sort changes using heuristic.
- v2.7: Use new PyDefiniton that includes signatures.
- v2.6: fix missing changes in `JModuleChanges`. Rename to edit_line_ids.
- v2.5: fix newline encoding bug.
- v2.4: fix buggy encoding of `Added` and `Deleted` changes.
- v2.3: always generate problems with full editing range and move the problem
splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`.
"""
+ VERSION = "2.9"
- VERSION = "2.8"
# change spans with more than this many lines will be ignored
max_span_lines: int = 500
===========changed ref 8===========
# module: coeditor.common
+ TAB = " " * 4
SEP = "-" * 80
HtmlCode = str
|
coeditor.encoding/TokenizedEdit.show | Modified | temp-1 | 31c7e81854ab4b6527ea4770eb5cba7041b6f0a8 | Fix exact match accuracy. | <0>:<add> origin_line = self.BAD_DELETE
| # module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
<s>tks[1:])
elif tks and tks[0] == Del_id:
return "- " + decode_tokens(tks[1:])
else:
return " " + decode_tokens(tks)
def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]):
segs = output_ids_as_seqs(tks)
lines = []
for k, seg in segs.items():
if not seg:
continue # skip empty lines
if seg[-1] == Del_id:
# show the deleted line
+ section_lines = tk_splitlines(main_tk_lines.get(k, TokenSeq()))
- origin_line = tk_splitlines(main_tk_lines.get(k, []))[0]
+ if section_lines:
+ origin_line = section_lines[0]
+ else:
<0> origin_line.append(Newline_id)
seg = seg + origin_line
label = show_label(id_map.get(k, -1))
lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}")
return "".join(lines)
def show_ctx(ctx_tks: TokenSeq):
lines = tk_splitlines(ctx_tks)
return "\n".join(" " + show_content(l) for l in lines)
main_segs = output_ids_as_seqs(self.main_tks)
id_map = {k: i for i, k in enumerate(main_segs)}
main_lines = list[str]()
for line_tks in tk_splitlines(self.main_tks):
if line_tks and is_extra_id(line_tks[0]):
prefix = show_label(id_map.get(line_</s> | ===========above chunk 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
# offset: -1
def show_label(i: int):
return f" <{i}>" if i <= 9 else f"<{i}>"
def show_content(tks: TokenSeq):
if tks and tks[0] == Add_id:
return "+ " + decode_tokens(tks[1:])
elif tks and tks[0] == Del_id:
return "- " + decode_</s>
===========below chunk 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
# offset: 1
<s> is_extra_id(line_tks[0]):
prefix = show_label(id_map.get(line_tks[0], -1))
line = prefix + show_content(line_tks[1:])
else:
line = " " + show_content(line_tks)
main_lines.append(line)
pred_lines = (
["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"]
if pred_tks
else []
)
outputs = [
"-" * 80,
*self.meta_data_lines(),
"========Ground Truth========",
show_extra_tokens(self.output_tks, main_segs),
*pred_lines,
"========Main Code========",
"\n".join(main_lines),
] + [
f"==========={name}===========\n" + show_ctx(tks)
for name, tks in self.all_ctxs().items()
]
return "\n".join(outputs)
===========unchanged ref 0===========
at: coeditor.common
Token = int
TokenSeq = list[Token]
at: coeditor.encoding
Add_id = get_tk_id(Add)
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
is_extra_id(tk: int) -> bool
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
meta_data_lines() -> list[str]
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
+ BAD_DELETE = encode_single_line("((bad delete))")
+ |
coeditor.model/RetrievalDecodingResult.exact_match_accuracy | Modified | temp-1 | 31c7e81854ab4b6527ea4770eb5cba7041b6f0a8 | Fix exact match accuracy. | <0>:<add> ex2correct[i] = is_correct
| # module: coeditor.model
@dataclass
class RetrievalDecodingResult:
def exact_match_accuracy(self) -> tuple[CountedSum, dict[int, bool]]:
ex2correct = dict[int, bool]()
bad_probs = list[C3Problem]()
for i, mp in enumerate(self.predictions):
prob = self.problems[i]
original = prob.span.original.tolist()
pred_delta = TkDelta.from_output_tks(prob.edit_line_ids, mp["output_ids"])
label_delta = TkDelta.from_output_tks(prob.edit_line_ids, mp["labels"])
if not prob.edit_line_ids:
bad_probs.append(prob)
continue
- line_shift = prob.edit_line_ids[0]
+ pred_change = pred_delta.apply_to_change(original)
- pred_change = pred_delta.shifted(line_shift).apply_to_change(original)
+ label_change = label_delta.apply_to_change(original)
- label_change = label_delta.shifted(line_shift).apply_to_change(original)
pred_code = tokens_to_change(pred_change).after
label_code = tokens_to_change(label_change).after
+ is_correct = code_equal(pred_code, label_code)
- ex2correct[i] = code_equal(pred_code, label_code)
<0> correct_count = CountedSum(sum(ex2correct.values()), len(ex2correct))
if bad_probs:
cprint("yellow", "Number of problems with no edits:", len(bad_probs))
for prob in bad_probs[:5]:
print(prob.summary())
return correct_count, ex2correct
| ===========unchanged ref 0===========
at: coeditor._utils
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
summary() -> str
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
CountedSum = WeightedSum[int, int]
code_equal(code1: str, code2: str) -> bool
at: coeditor.encoding
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
===========unchanged ref 1===========
at: coeditor.model.RetrievalDecodingResult
eval_args: dict
problems: Sequence[C3Problem]
predictions: Sequence[RetrievalModelPrediction]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
+ BAD_DELETE = encode_single_line("((bad delete))")
+
===========changed ref 1===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
def show_label(i: int):
return f" <{i}>" if i <= 9 else f"<{i}>"
def show_content(tks: TokenSeq):
if tks and tks[0] == Add_id:
return "+ " + decode_tokens(tks[1:])
elif tks and tks[0] == Del_id:
return "- " + decode_tokens(tks[1:])
else:
return " " + decode_tokens(tks)
def show_extra_tokens(tks: TokenSeq, main_tk_lines: dict[Token, TokenSeq]):
segs = output_ids_as_seqs(tks)
lines = []
for k, seg in segs.items():
if not seg:
continue # skip empty lines
if seg[-1] == Del_id:
# show the deleted line
+ section_lines = tk_splitlines(main_tk_lines.get(k, TokenSeq()))
- origin_line = tk_splitlines(main_tk_lines.get(k, []))[0]
+ if section_lines:
+ origin_line = section_lines[0]
+ else:
+ origin_line = self.BAD_DELETE
origin_line.append(Newline_id)
seg = seg + origin_line
label = show_label(id_map.get(k, -1))
lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}")
return "".join(lines)
def show_ctx(ctx_tks: TokenSeq):
lines = tk_splitlines(ctx_tks)
return "\n".join(" " + show_content(l) for l in lines)
main_segs = output_ids_as_seqs(self.main_tks)
id_map = {k:</s>
===========changed ref 2===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
def show(self, pred_tks: TokenSeq | None = None) -> str:
# offset: 1
<s> main_segs = output_ids_as_seqs(self.main_tks)
id_map = {k: i for i, k in enumerate(main_segs)}
main_lines = list[str]()
for line_tks in tk_splitlines(self.main_tks):
if line_tks and is_extra_id(line_tks[0]):
prefix = show_label(id_map.get(line_tks[0], -1))
line = prefix + show_content(line_tks[1:])
else:
line = " " + show_content(line_tks)
main_lines.append(line)
pred_lines = (
["========Prediction========", f"{show_extra_tokens(pred_tks, main_segs)}"]
if pred_tks
else []
)
outputs = [
"-" * 80,
*self.meta_data_lines(),
"========Ground Truth========",
show_extra_tokens(self.output_tks, main_segs),
*pred_lines,
"========Main Code========",
"\n".join(main_lines),
] + [
f"==========={name}===========\n" + show_ctx(tks)
for name, tks in self.all_ctxs().items()
]
return "\n".join(outputs)
|
coeditor.model/RetrievalEditorModel.train_on_data | Modified | temp-1 | 43582c1474adb11a85ed0bf327e072ff7419fba4 | Update traning scripts. | <0>:<add> save_steps=max(500, min(5000, epoch_steps // 5)),
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
<s>dataloader))
n_samples = metrics["loss_per_ex"].weight
metrics = {
f"{metric_key_prefix}_{k}": v.mean() for k, v in metrics.items()
}
return EvalLoopOutput(
predictions=tuple(),
label_ids=tuple(),
metrics=metrics,
num_samples=n_samples,
)
epoch_steps = len(train_loader)
cprint("blue", "Number of training batches (estimate):", epoch_steps)
trainer_args = Seq2SeqTrainingArguments(
output_dir=str(train_dir),
overwrite_output_dir=True,
evaluation_strategy="epoch",
save_strategy="steps",
- save_steps=max(1, min(5000, epoch_steps // 5)),
<0> logging_steps=max(1, min(1000, epoch_steps // 10)),
num_train_epochs=train_args.max_train_epochs,
save_total_limit=3,
lr_scheduler_type=train_args.lr_scheduler_type,
learning_rate=train_args.learning_rate,
weight_decay=train_args.weight_decay,
metric_for_best_model="loss_per_tk",
greater_is_better=False,
fp16=True,
# load_best_model_at_end=True,
push_to_hub=False,
report_to=["wandb"],
disable_tqdm=True,
# torchdynamo="inductor", # use compiled model
)
trainer = DynamicTrainer(
self,
trainer_args,
# callbacks=[EarlyStoppingCallback(</s> | ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
# offset: -1
train_dir = get_model_dir(trained=False) / training_name
eval_loader.tqdm_args = {"disable": True}
model = self
# model = torch.compile(self.to("cuda")) # pytorch doesn't support python 3.11 yet.
class DynamicTrainer(Seq2SeqTrainer):
def get_train_dataloader(self):
return train_loader
def get_eval_dataloader(self, eval_dataset):
return eval_loader
def evaluation_loop(
self,
dataloader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
metrics = model.eval_loss_on_loader(as_any(dataloader))
n_samples = metrics["loss_per_ex"].weight
metrics = {
f"{</s>
===========below chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
# offset: 1
<s> )
trainer = DynamicTrainer(
self,
trainer_args,
# callbacks=[EarlyStoppingCallback(early_stopping_patience=1)],
)
trainer.train()
save_dir = get_model_dir(trained=True) / training_name
self.save(save_dir)
print("Model saved to:", save_dir)
===========unchanged ref 0===========
at: coeditor._utils
as_any(x) -> Any
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.common
get_model_dir(trained=True) -> Path
at: coeditor.common.WeightedSum
sum: V
weight: W
mean() -> float
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
eval_loss_on_loader(dataloader: "C3DataLoader")
decorate_autocast(dataloader: "C3DataLoader")
save(save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None)
===========unchanged ref 1===========
at: coeditor.model.TrainingArgs
learning_rate: float = 2e-5
weight_decay: float = 0.01
max_train_epochs: int = 3
reinit_weights: bool = False
quicktest: bool = False
lr_scheduler_type: SchedulerType = SchedulerType.LINEAR
at: transformers.trainer.Trainer
get_train_dataloader(self) -> DataLoader
get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader
train(resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union["optuna.Trial", Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs)
evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str="eval") -> EvalLoopOutput
at: transformers.trainer_seq2seq
Seq2SeqTrainer(model: Union["PreTrainedModel", nn.Module]=None, args: "TrainingArguments"=None, data_collator: Optional["DataCollator"]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional["PreTrainedTokenizerBase"]=None, model_init: Optional[Callable[[], "PreTrainedModel"]]=None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]]=None, callbacks: Optional[List["TrainerCallback"]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None)
at: transformers.trainer_utils
EvalLoopOutput(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
|
scripts.train_model/train_model | Modified | temp-1 | 43582c1474adb11a85ed0bf327e072ff7419fba4 | Update traning scripts. | <0>:<add> pprint(train_loader.get_batch_stats())
| # module: scripts.train_model
def train_model(
dataset_name: str,
model_variant: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
eval_only: bool = False,
):
<s>_loader.get_batch_stats())
warmup_targs = copy.deepcopy(train_args)
warmup_targs.learning_rate *= 4
warmup_targs.max_train_epochs = 1
model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs)
with timed_action("Fine-tune Training"):
# we attach the problem transform to the dataloader to generate data on-the-fly
train_loader = C3DataLoader(
datasets["train"],
encoder.problem_tranform,
train_tkn,
batch_args,
shuffle=True,
desc="training",
)
+ print("Fine-tune batch stats:")
<0> model.train_on_data(model_name, train_loader, eval_loader, train_args)
model.to("cuda")
with timed_action("Loss Evaluation"):
eval_result = model.eval_loss_on_loader(eval_loader)
eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
wandb.log(eval_dict)
max_saved_samples = 300
with timed_action("Accuracy Evaluation"):
dec_result = model.predict_on_data(
datasets["test"], eval_tkn, eval_batch_args, dec_args
)
pickle_dump(get_model_dir() / model_name / "dec_result.pkl", dec_result)
exact_acc, exact_correct_map = dec_result.exact_</s> | ===========above chunk 0===========
# module: scripts.train_model
def train_model(
dataset_name: str,
model_variant: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
eval_only: bool = False,
):
# offset: -1
<s> use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_tkn = encoder.edit_tokenizer
eval_tkn = copy.deepcopy(train_tkn)
eval_tkn.max_query_tks *= 2
eval_tkn.max_output_tks *= 2
eval_tkn.max_ref_tks_sum *= 2
eval_loader = C3DataLoader(
datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval"
)
if not eval_only:
with timed_action("Warm-up Training"):
warmup_bargs = copy.deepcopy(batch_args)
warmup_bargs.min_queries *= 4
warmup_bargs.max_queries *= 2
warm_up_data = random_subset(
datasets["train"], len(datasets["train"]) // 4, rng=42
)
warmup_tkn = copy.copy(train_tkn)
warmup_tkn.max_ref_tks_sum //= 3
warmup_loader = C3DataLoader(
warm_up_data,
encoder.problem_tranform,
warmup_tkn,
warmup_bargs,
shuffle=True,
desc="warm-up training",
)
+ print("Warmup batch stats:")
+ pprint(warmup_loader.get_batch_stats())
warmup_targs = copy.deepcopy(train_args)
warm</s>
===========above chunk 1===========
# module: scripts.train_model
def train_model(
dataset_name: str,
model_variant: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
eval_only: bool = False,
):
# offset: -2
<s>batch_args": batch_args,
"train_args": train_args,
"dec_args": dec_args,
}.items()
}
project = "Coeditor" if not train_args.quicktest else "Coeditor-quicktest"
if eval_only:
project = "eval-" + project
wandb.init(dir="..", project=project, name=model_name, config=config_dict)
if train_args.quicktest:
print("Using fewer data for quick test.")
n_quick_exs = 20
datasets = C3ProblemDataset(
train=datasets["train"][:n_quick_exs],
valid=datasets["valid"][:n_quick_exs],
test=datasets["test"][:n_quick_exs],
)
if not eval_only:
model = RetrievalEditorModel.from_code_t5(
"base", reuse_embed=True, reinit_weights=train_args.reinit_weights
)
else:
model = RetrievalEditorModel.load(get_model_dir() / model_name)
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
warnings.warn(
"CUDA_VISIBLE_DEVICES not set, using 0. Note that "
"the Huggingface Trainer will use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
</s>
===========above chunk 2===========
# module: scripts.train_model
def train_model(
dataset_name: str,
model_variant: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
eval_only: bool = False,
):
# offset: -3
# model_variant = "-file"
model_name = f"coeditor-{dataset_name}"
model_name += model_variant
dec_args = DecodingArgs()
if train_args.quicktest:
model_name = "quicktest-" + model_name
if not eval_only:
check_save_dir(model_name)
# problems will be transformed and saved for valid and test but not train.
datasets = make_or_load_dataset(
dataset_name,
encoder.change_processor,
encoder.problem_tranform,
remake_problems=recreate_data,
)
config_dict = {
k: get_modified_args(v)
for k, v in {
"edit_tokenizer": encoder.edit_tokenizer.get_args(),
</s>
===========below chunk 0===========
# module: scripts.train_model
def train_model(
dataset_name: str,
model_variant: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
eval_only: bool = False,
):
# offset: 1
<s> "dec_result.pkl", dec_result)
exact_acc, exact_correct_map = dec_result.exact_match_accuracy()
wandb.log({"test/exact-acc": exact_acc.average()})
out_dir = get_model_dir() / model_name / "exact_match_samples"
dec_result.save_examples_to_dir(
out_dir, random_subset(exact_correct_map, max_saved_samples, rng=42)
)
cprint("blue", "Exact-match samples saved to:", out_dir)
return model
|
coeditor.service/EditPredictionService._suggest_edit_two_steps | Modified | temp-1 | 5b896681fc006d5248b4a146b1f31189dbf31e7a | Service upgrade to support extension v0.3.5. - Add new server method `initialize` and `get_result`. - Support output line status. | <0>:<add> output_status=output_status,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
<s>.after),
)
)
- diff_ops = get_diff_ops(
- splitlines(pred_change.before), splitlines(pred_change.after)
- )
- line_status = dict[int, StatusTag]()
- for tag, (i1, i2), _ in diff_ops:
- if tag == "A":
- line_status[i1] = "A"
- continue
- for i in range(i1, i2):
- if i not in line_status:
- line_status[i] = tag
+ input_status, change_status = compute_line_status(pred_change)
+ input_status = [
- line_status = [
+ (i + target_lines[0], tag) for i, tag in input_status.items()
- (i + target_lines[0], tag) for i, tag in line_status.items()
]
+ output_status = list(change_status.items())
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=pred_change.after,
+ input_status=input_status,
- line_status=line_status[: len(target_lines)],
<0> )
suggestions.append(suggestion)
return ServiceResponse(
target_file=str(self.project / file),
edit_start=(target_lines[0], 0),
edit_end=(target_lines[-1] + 1, 0),
target_lines=target.target_lines,
input_code=target.current_code,
suggestions=suggestions,
)
return target, next_step
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -1
<s>_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = RetrievalDecodingResult.show_prediction(
problem, pred
)
print(pred_str, file=f)
target_lines = target.target_lines
suggestions = list[EditSuggestion]()
for pred in predictions:
pred_change = self.apply_edit_to_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".join(
compute_line_diffs_fast(
splitlines(pred_change.before),
splitlines(pred_change.after),
)
)
- diff_ops = get_diff_ops(
- splitlines(pred_change</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -2
timed = self.tlogger.timed
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
target = self.get_target_code(span.code, problem, tk_prob)
def next_step():
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
RelPath = NewType("RelPath", Path)
splitlines(text: str) -> list[str]
===========unchanged ref 1===========
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.service
EditSuggestion(map: Mapping[_KT, _VT], **kwargs: _VT)
EditSuggestion(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
EditSuggestion(**kwargs: _VT)
|
scripts.start_server/start_server | Modified | temp-1 | 5b896681fc006d5248b4a146b1f31189dbf31e7a | Service upgrade to support extension v0.3.5. - Add new server method `initialize` and `get_result`. - Support output line status. | <0>:<add> response = cont.get()
| # module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
<s> <add> @method
+ @handle_error
+ def submit_problem(
+ id: int, project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
+ ):
+ target_dir = Path(project).resolve()
+ service = services[target_dir]
- services[target_dir] = service
print(f"Suggesting edit for lines {lines} in {file}")
path = Path(file)
if Path.is_absolute(path):
path = path.relative_to(target_dir)
path = to_rel_path(path)
service.tlogger.clear()
log_dir = service.project / ".coeditor_logs" if writeLogs else None
+ region, f = service._suggest_edit_two_steps(path, lines, log_dir)
- region, cont = service._suggest_edit_two_steps(path, lines, log_dir)
+ if target_dir in tasks and tasks[target_dir].id > id:
+ return Success("Skipped")
+ tasks[target_dir] = LazyVal(f, id)
- continuations[target_dir] = cont
return Success(region.target_lines)
@method
@handle_error
+ def get_result(id: int, project: str):
- def get_result(project: str):
target_dir = Path(project).resolve()
+ cont = tasks[target_dir]
+ if cont.id > id:
+ return Success("Skipped")
- f = continuations.pop(target_dir)
- response = f()
<0> service = services[target_dir]
if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
print(f"Starting suggestion server at localhost:{port}")
serve("localhost", port)
| ===========above chunk 0===========
# module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# offset: -1
<s> (
+ get_model_dir(trained=False)
+ / "coeditor-xl-c3-dropout-v1.5"
+ / "checkpoint-230000"
+ )
model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
dec_args = DecodingArgs(do_sample=False, num_beams=4)
services = dict[Path, EditPredictionService]()
+ tasks = dict[Path, LazyVal[ServiceResponse]]()
- continuations = dict[Path, Callable[[], ServiceResponse]]()
def handle_error(f, *args, **kwargs):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
traceback.print_exception(e)
return Error(code=1, message=repr(e))
return wrapper
@method
@handle_error
- def submit_problem(
- project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
- ):
+ def initialize(project: str):
target_dir = Path(project).resolve()
+ tasks.pop(target_dir, None)
+
+ if target_dir not in services:
- if (service := services.get(target_dir)) is None:
with timed_action(f"Create service for project: {target_dir}"):
detector = ChangeDetector(target_dir)
+ services[target_dir] = EditPredictionService(
- service = EditPredictionService(
detector,
model,
dec_args=dec_args,
)
+
+ return Success("OK")
+
+ @method
+ @handle_error
+ def submit_problem(
+ id: int, project: str,</s>
===========above chunk 1===========
# module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# offset: -2
# this newer model is trained with comments
+ # model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.5"
- model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.4"
+ model_path = (
+ get_model_dir(trained=False)
+ / "coeditor-xl-c3-dropout-v</s>
===========unchanged ref 0===========
at: coeditor._utils
timed_action(name: str, silent: bool=False)
at: coeditor.common
T1 = TypeVar("T1")
get_model_dir(trained=True) -> Path
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
ServiceResponse(target_file: str, target_project: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
EditPredictionService()
at: functools
wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T]
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
===========unchanged ref 1===========
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: traceback
print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
at: transformers.modeling_utils.PreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_no_split_modules = None
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
_keys_to_ignore_on_load_missing = None
_keys_to_ignore_on_load_unexpected = None
_keys_to_ignore_on_save = None
_tied_weights_keys = None
is_parallelizable = False
supports_gradient_checkpointing = False
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., /, *, device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., tensor: Tensor)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Sequence = _alias(collections.abc.Sequence, 1)
===========unchanged ref 2===========
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: coeditor.service
# add, delete, replace, equal
+ StatusTag = Literal["A", "D", "R", " ", "RA", "RD"]
- StatusTag = Literal["A", "D", "R", " "]
===========changed ref 1===========
# module: coeditor.service
class EditSuggestion(TypedDict):
score: float
change_preview: str
new_code: str
+ input_status: list[tuple[int, StatusTag]]
- line_status: list[tuple[int, StatusTag]]
+ output_status: list[tuple[int, StatusTag]]
|
scripts.start_server/start_server | Modified | temp-1 | dff3d37085f62984add2598f6e095b7ab3ed51d1 | v0.3.5 quick fix. - Use request time instead of counter. | <0>:<add> if cont.id > time:
| # module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
<s> @handle_error
def submit_problem(
+ time: int, project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
- id: int, project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
):
+ initialize(project)
target_dir = Path(project).resolve()
service = services[target_dir]
print(f"Suggesting edit for lines {lines} in {file}")
path = Path(file)
if Path.is_absolute(path):
path = path.relative_to(target_dir)
path = to_rel_path(path)
service.tlogger.clear()
log_dir = service.project / ".coeditor_logs" if writeLogs else None
region, f = service._suggest_edit_two_steps(path, lines, log_dir)
+ if target_dir in tasks and tasks[target_dir].id > time:
- if target_dir in tasks and tasks[target_dir].id > id:
return Success("Skipped")
+ tasks[target_dir] = LazyVal(f, time)
- tasks[target_dir] = LazyVal(f, id)
return Success(region.target_lines)
@method
@handle_error
+ def get_result(time: int, project: str):
- def get_result(id: int, project: str):
target_dir = Path(project).resolve()
cont = tasks[target_dir]
- if cont.id > id:
<0> return Success("Skipped")
response = cont.get()
service = services[target_dir]
if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
print(f"Starting suggestion server at localhost:{port}")
serve("localhost", port)
| ===========above chunk 0===========
# module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# offset: -1
- # this newer model is trained with comments
# model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.5"
- model_path = (
- get_model_dir(trained=False)
- / "coeditor-xl-c3-dropout-v1.5"
- / "checkpoint-230000"
- )
+ model_path = get_model_dir() / "coeditor-xl-c3-dropout-v1.5"
model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
dec_args = DecodingArgs(do_sample=False, num_beams=4)
services = dict[Path, EditPredictionService]()
tasks = dict[Path, LazyVal[ServiceResponse]]()
def handle_error(f, *args, **kwargs):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
traceback.print_exception(e)
return Error(code=1, message=repr(e))
return wrapper
@method
@handle_error
def initialize(project: str):
target_dir = Path(project).resolve()
- tasks.pop(target_dir, None)
if target_dir not in services:
with timed_action(f"Create service for project: {target_dir}"):
detector = ChangeDetector(target_dir)
services[target_dir] = EditPredictionService(
detector,
model,
dec_args=dec_args,
)
return Success("OK")
@method
@handle_error
def submit_problem(
+ time: int, project: str, file: str, lines: Sequence</s>
===========unchanged ref 0===========
at: IPython.core.display_functions
display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
at: coeditor._utils
timed_action(name: str, silent: bool=False)
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
as_dataframe()
clear()
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
get_model_dir(trained=True) -> Path
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
ServiceResponse(target_file: str, target_project: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
===========unchanged ref 1===========
EditPredictionService()
at: coeditor.service.EditPredictionService
_suggest_edit_two_steps(file: RelPath, edit_lines: Sequence[int] | int, log_dir: Path | None=Path(".coeditor_logs"), n_suggestions: int=1) -> tuple[_EditRegion, Callable[[], ServiceResponse]]
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.tlogger = _tlogger
at: coeditor.service.ServiceResponse
target_file: str
target_project: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
to_json()
at: coeditor.service._EditRegion
current_code: str
target_lines: Sequence[int]
target_line_ids: Sequence[int]
at: functools
wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T]
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: jsonrpcserver.server
serve(name: str="", port: int=5000) -> None
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
|
coeditor.encoding/truncate_section | Modified | temp-1 | 988f27c554771e639fd8c0e03b67f336c3a715dc | Change TruncateAt to a normal class. | <0>:<add> assert_eq(direction, TruncateAt.Right)
| # module: coeditor.encoding
def truncate_section(
sec: TokenSeq,
+ direction: TruncateAt.Value,
- direction: TruncateAt,
limit: int,
add_bos: bool = True,
inplace: bool = False,
) -> TokenSeq:
if len(sec) <= limit:
return sec
+ if direction == TruncateAt.Left:
- if direction.value == TruncateAt.Left.value:
if inplace:
del sec[:-limit]
else:
sec = sec[-limit:]
if add_bos and sec:
sec[0] = BOS_id
else:
- assert_eq(direction.value, TruncateAt.Right.value)
<0> if inplace:
del sec[limit:]
else:
sec = sec[:limit]
if add_bos and sec:
sec[-1] = EOS_id
return sec
| ===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
BOS_id = get_tk_id("<s>")
EOS_id = get_tk_id("</s>")
TruncateAt()
at: coeditor.encoding.TruncateAt
Value = int
Left = 0
Right = 1
===========changed ref 0===========
# module: coeditor.encoding
+ class TruncateAt:
- class TruncateAt(enum.Enum):
+ Value = int
+
Left = 0
Right = 1
===========changed ref 1===========
# module: coeditor.encoding
+ class TruncateAt:
- class TruncateAt(enum.Enum):
- def reversed(self) -> Self:
- if self == TruncateAt.Left:
- return TruncateAt.Right
- else:
- return TruncateAt.Left
- |
tests.test_edits/test_splitlines | Modified | temp-1 | aab89e924dbff5dc3021355d563ef87f4973efa4 | Fix test rng seeds. | <0>:<add> rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
| # module: tests.test_edits
def test_splitlines():
+ rng = get_rng()
for n in range(100):
- rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
<0> input = "".join(rand_input).rstrip("\n")
lines = splitlines(input)
# basic identity
assert "\n".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
| ===========unchanged ref 0===========
at: coeditor.common
splitlines(text: str) -> list[str]
count_lines(text: str) -> int
assert_str_equal(actual: str, expect: str, name: str | None=None)
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
encode_lines_join(text: str) -> TokenSeq
at: random.Random
VERSION = 3 # used by getstate/setstate
_randbelow = _randbelow_with_getrandbits
choice(seq: Sequence[_T]) -> _T
at: tests.test_edits
get_rng()
at: tests.test_edits.assert_tks_eq
actual_str = decode_tokens(actual)
===========changed ref 0===========
# module: tests.test_edits
+ def get_rng():
+ return random.Random(42)
+ |
tests.test_edits/TestChangeIdentities.test_random_subset | Modified | temp-1 | aab89e924dbff5dc3021355d563ef87f4973efa4 | Fix test rng seeds. | <0>:<add> y_map = random_subset(x_map, 20, rng)
| # module: tests.test_edits
class TestChangeIdentities:
def test_random_subset(self):
+ rng = get_rng()
+
def is_sorted(xs):
return list(xs) == list(sorted(xs))
xs = range(50)
assert is_sorted(xs)
+ for _ in range(100):
- for _ in range(50):
+ ys = random_subset(xs, 20, rng)
- ys = random_subset(xs, 20)
assert is_sorted(ys)
x_map = {i: i + 1 for i in range(50)}
assert is_sorted(x_map)
+ for _ in range(100):
- for _ in range(50):
- y_map = random_subset(x_map, 20)
<0> assert is_sorted(y_map)
| ===========unchanged ref 0===========
at: coeditor.common
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
at: tests.test_edits
get_rng()
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========unchanged ref 2===========
at: tests.test_edits.TestChangeIdentities.test_apply_to_change
tk_change = tk_delta.apply_to_change(tk_before)
expect = change_to_tokens(c)
===========changed ref 0===========
# module: tests.test_edits
+ def get_rng():
+ return random.Random(42)
+
===========changed ref 1===========
# module: tests.test_edits
def test_splitlines():
+ rng = get_rng()
for n in range(100):
+ rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
- rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
input = "".join(rand_input).rstrip("\n")
lines = splitlines(input)
# basic identity
assert "\n".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
|
tests.test_edits/TestChangeIdentities.test_delta_decomposition | Modified | temp-1 | aab89e924dbff5dc3021355d563ef87f4973efa4 | Fix test rng seeds. | <0>:<add> n_keys = int(len(keys) * rng.random())
| # module: tests.test_edits
class TestChangeIdentities:
def test_delta_decomposition(self):
+ rng = get_rng()
+
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
assert_tks_eq(original, encode_lines_join(get_before(c)), name)
expect = delta.apply_to_input(original)
assert_tks_eq(expect, encode_lines_join(get_after(c)), name)
keys = tuple(delta.keys())
+ for _ in range(100):
- for _ in range(50):
- n_keys = int(len(keys) * random.random())
<0> sub_keys = random_subset(keys, n_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
try:
assert_tks_eq(step2, expect, name)
except:
print_sections(
("change", decode_tokens(change_to_tokens(c))),
("delta", str(delta)),
("sub_keys", str(sub_keys)),
("original", decode_tokens(original)),
("delta1", str(delta1)),
("step1", decode_tokens(step1)),
("delta2", str(delta2)),
("step2", decode_tokens(step2)),
("expect", decode_tokens(expect)),
)
raise
| ===========unchanged ref 0===========
at: coeditor.common
print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
encode_lines_join(text: str) -> TokenSeq
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]
at: random.Random
random() -> float
at: tests.test_edits
get_rng()
get_before(change: Change[str]) -> str
get_after(change: Change[str]) -> str
assert_tks_eq(actual: TokenSeq, expected: TokenSeq, name: str)
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========unchanged ref 2===========
at: tests.test_edits.TestChangeIdentities.test_random_subset
rng = get_rng()
is_sorted(xs)
===========changed ref 0===========
# module: tests.test_edits
+ def get_rng():
+ return random.Random(42)
+
===========changed ref 1===========
# module: tests.test_edits
class TestChangeIdentities:
def test_random_subset(self):
+ rng = get_rng()
+
def is_sorted(xs):
return list(xs) == list(sorted(xs))
xs = range(50)
assert is_sorted(xs)
+ for _ in range(100):
- for _ in range(50):
+ ys = random_subset(xs, 20, rng)
- ys = random_subset(xs, 20)
assert is_sorted(ys)
x_map = {i: i + 1 for i in range(50)}
assert is_sorted(x_map)
+ for _ in range(100):
- for _ in range(50):
+ y_map = random_subset(x_map, 20, rng)
- y_map = random_subset(x_map, 20)
assert is_sorted(y_map)
===========changed ref 2===========
# module: tests.test_edits
def test_splitlines():
+ rng = get_rng()
for n in range(100):
+ rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
- rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
input = "".join(rand_input).rstrip("\n")
lines = splitlines(input)
# basic identity
assert "\n".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
|
tests.test_edits/TestChangeIdentities.test_get_new_target_lines | Modified | temp-1 | aab89e924dbff5dc3021355d563ef87f4973efa4 | Fix test rng seeds. | <0>:<add> n_keys = int(len(keys) * rng.random())
| # module: tests.test_edits
class TestChangeIdentities:
def test_get_new_target_lines(self):
+ rng = get_rng()
+
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
n_origin_lines = len(tk_splitlines(original))
edit_lines = range(n_origin_lines + 1)
keys = tuple(delta.keys())
+ for _ in range(100):
- for _ in range(10):
- n_keys = int(len(keys) * random.random())
<0> sub_keys = random_subset(keys, n_keys)
sub_keys.sort()
delta1, delta2 = delta.decompose_for_change(sub_keys)
new_edit_lines = delta1.get_new_target_lines(edit_lines)
new_edit_set = set(new_edit_lines)
for l in delta2.changed_lines():
if l not in new_edit_set and l != n_origin_lines:
print_err(f"{edit_lines=}")
print_err("original", SEP)
print_err(add_line_numbers(decode_tokens(original), start=0))
print_err(SEP)
print_err(f"{delta=}")
print_err(f"{sub_keys=}")
print_err(f"{delta1=}")
print_err("step1", SEP)
step1 = delta1.apply_to_change(original)
print_err(add_line_numbers(decode_tokens(step1), start=0))
print_err(SEP)
print_err(f"{new_edit_lines=}")
print_err(f"{delta2=}")
raise AssertionError(f"{l=} not in {new_edit_lines=}")
| ===========unchanged ref 0===========
at: coeditor._utils
add_line_numbers(code: str, start: int=1)
at: coeditor.common
SEP = "-" * 80
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.encoding.TkDelta
from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]
at: random.Random
random() -> float
at: tests.test_edits
get_rng()
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========unchanged ref 2===========
at: tests.test_edits.TestChangeIdentities.test_delta_decomposition
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
expect = delta.apply_to_input(original)
delta1, delta2 = delta.decompose_for_input(sub_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
===========changed ref 0===========
# module: tests.test_edits
+ def get_rng():
+ return random.Random(42)
+
===========changed ref 1===========
# module: tests.test_edits
class TestChangeIdentities:
def test_random_subset(self):
+ rng = get_rng()
+
def is_sorted(xs):
return list(xs) == list(sorted(xs))
xs = range(50)
assert is_sorted(xs)
+ for _ in range(100):
- for _ in range(50):
+ ys = random_subset(xs, 20, rng)
- ys = random_subset(xs, 20)
assert is_sorted(ys)
x_map = {i: i + 1 for i in range(50)}
assert is_sorted(x_map)
+ for _ in range(100):
- for _ in range(50):
+ y_map = random_subset(x_map, 20, rng)
- y_map = random_subset(x_map, 20)
assert is_sorted(y_map)
===========changed ref 2===========
# module: tests.test_edits
class TestChangeIdentities:
def test_delta_decomposition(self):
+ rng = get_rng()
+
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
assert_tks_eq(original, encode_lines_join(get_before(c)), name)
expect = delta.apply_to_input(original)
assert_tks_eq(expect, encode_lines_join(get_after(c)), name)
keys = tuple(delta.keys())
+ for _ in range(100):
- for _ in range(50):
+ n_keys = int(len(keys) * rng.random())
- n_keys = int(len(keys) * random.random())
sub_keys = random_subset(keys, n_keys)
delta1, delta2 = delta.decompose_for_input(sub_keys)
step1 = delta1.apply_to_input(original)
step2 = delta2.apply_to_input(step1)
try:
assert_tks_eq(step2, expect, name)
except:
print_sections(
("change", decode_tokens(change_to_tokens(c))),
("delta", str(delta)),
("sub_keys", str(sub_keys)),
("original", decode_tokens(original)),
("delta1", str(delta1)),
("step1", decode_tokens(step1)),
("delta2", str(delta2)),
("step2", decode_tokens(step2)),
("expect", decode_tokens(expect)),
)
raise
===========changed ref 3===========
# module: tests.test_edits
def test_splitlines():
+ rng = get_rng()
for n in range(100):
+ rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
- rand_input = [random.choice(["a", "b", "c", "\n"]) for _ in range(n)]
input = "".join(rand_input).rstrip("\n")
lines = splitlines(input)
# basic identity
assert "\n".join(lines) == input
assert count_lines(input) == len(lines)
# encode and decode
enc = encode_lines_join(input)
assert decode_tokens(enc) == input
# split tokens
tk_lines = tk_splitlines(enc)
assert len(tk_lines) == len(lines)
assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
|