path
stringlengths 15
77
| type
stringclasses 1
value | project
stringclasses 1
value | commit_hash
stringlengths 40
40
| commit_message
stringlengths 15
198
| ground_truth
stringlengths 26
155
| main_code
stringlengths 176
2.5k
| context
stringlengths 91
9.37k
|
---|---|---|---|---|---|---|---|
coeditor.common/to_rel_path | Modified | temp-1 | cf23297cad932a75693d4fbb622a97318cc02bc2 | Fix model link. | <0>:<add> path = Path(path)
| # module: coeditor.common
+ def to_rel_path(path: os.PathLike) -> RelPath:
- def to_rel_path(path: Path) -> RelPath:
<0> if path.is_absolute():
raise ValueError(f"Expected a relative path, got: {path}")
return RelPath(path)
| ===========unchanged ref 0===========
at: coeditor.common
RelPath = NewType("RelPath", Path)
at: pathlib
Path()
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
is_absolute() -> bool
|
scripts.start_server/start_server | Modified | temp-1 | cf23297cad932a75693d4fbb622a97318cc02bc2 | Fix model link. | <0>:<add> # model_path = get_model_dir() / "coeditor-xl-c3-dropout-v1.5"
| # module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
+ model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.5"
- # model_path = "MrVPlusOne/coeditor-xl-c3-dropout-v1.5"
- model_path = get_model_dir() / "coeditor-xl-c3-dropout-v1.5"
<0> model = RetrievalEditorModel.load(model_path)
model.to(device)
print(f"Model '{model_path}' loaded on device:", device)
dec_args = DecodingArgs(do_sample=False, num_beams=4)
services = dict[Path, EditPredictionService]()
tasks = dict[Path, LazyVal[ServiceResponse]]()
def handle_error(f, *args, **kwargs):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
traceback.print_exception(e)
return Error(code=1, message=repr(e))
return wrapper
@method
@handle_error
def initialize(project: str):
target_dir = Path(project).resolve()
if target_dir not in services:
with timed_action(f"Create service for project: {target_dir}"):
detector = ChangeDetector(target_dir)
services[target_dir] = EditPredictionService(
detector,
model,
dec_args=dec_args,
)
return Success("OK")
@method
@handle_error
def submit_problem(
time: int, project: str, file: str, lines: Sequence[int] | int, writeLogs: bool
):
initialize(project)
target_dir = Path(project).resolve()
service = services[target_dir]
print(f"Suggesting edit for lines {lines} in {file}")
</s> | ===========below chunk 0===========
# module: scripts.start_server
def start_server(device, port: int, print_stats: bool = True):
# offset: 1
<s> service = services[target_dir]
print(f"Suggesting edit for lines {lines} in {file}")
path = Path(file)
if Path.is_absolute(path):
path = path.relative_to(target_dir)
path = to_rel_path(path)
service.tlogger.clear()
log_dir = service.project / ".coeditor_logs" if writeLogs else None
region, f = service._suggest_edit_two_steps(path, lines, log_dir)
if target_dir in tasks and tasks[target_dir].id > time:
return Success("Skipped")
tasks[target_dir] = LazyVal(f, time)
return Success(region.target_lines)
@method
@handle_error
def get_result(time: int, project: str):
target_dir = Path(project).resolve()
cont = tasks[target_dir]
if cont.id > time:
return Success("Skipped")
response = cont.get()
service = services[target_dir]
if print_stats:
print("Runtime stats:")
display(service.tlogger.as_dataframe())
return Success(response.to_json())
print(f"Starting suggestion server at localhost:{port}")
serve("localhost", port)
===========unchanged ref 0===========
at: IPython.core.display_functions
display(*, include=None, exclude=None, metadata=None, transient=None, display_id=None, raw=False, clear=False, source=_sentinel, **kwargs)
at: coeditor._utils
timed_action(name: str, silent: bool=False)
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
as_dataframe()
clear()
at: coeditor.common
to_rel_path(path: os.PathLike | str) -> RelPath
at: coeditor.model
DecodingArgs(max_output_tks: int=512, do_sample: bool=False, top_p: float=0.9, num_beams: Optional[int]=1, length_penalty: float=0.0, marginalize_samples: int=1)
RetrievalEditorModel(config: T5Config)
at: coeditor.model.DecodingArgs
max_output_tks: int = 512
do_sample: bool = False
top_p: float = 0.9
num_beams: Optional[int] = 1
length_penalty: float = 0.0
marginalize_samples: int = 1
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
load(save_dir: Path | str) -> "RetrievalEditorModel"
at: coeditor.service
ChangeDetector(project: Path, untracked_as_additions: bool=True, ignore_dirs: Collection[str]=field(default_factory=lambda: DefaultIgnoreDirs), max_lines_to_edit: int=30)
ServiceResponse(target_file: str, target_project: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
EditPredictionService()
===========unchanged ref 1===========
at: coeditor.service.EditPredictionService
_suggest_edit_two_steps(file: RelPath, edit_lines: Sequence[int] | int, log_dir: Path | None=Path(".coeditor_logs"), n_suggestions: int=1) -> tuple[_EditRegion, Callable[[], ServiceResponse]]
at: coeditor.service.EditPredictionService.__init__
self.project = detector.project
self.tlogger = _tlogger
at: coeditor.service.ServiceResponse
target_file: str
target_project: str
edit_start: tuple[int, int]
edit_end: tuple[int, int]
target_lines: Sequence[int]
input_code: str
suggestions: list[EditSuggestion]
to_json()
at: coeditor.service._EditRegion
current_code: str
target_lines: Sequence[int]
target_line_ids: Sequence[int]
at: functools
wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T]
at: jsonrpcserver.methods
method(f: Optional[Method]=None, name: Optional[str]=None) -> Callable[..., Any]
at: jsonrpcserver.result
Success(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
Error(fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any) -> Either[ErrorResult, SuccessResult]
at: jsonrpcserver.server
serve(name: str="", port: int=5000) -> None
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
resolve(strict: bool=...) -> _P
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
===========unchanged ref 2===========
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
relative_to(*other: Union[str, _PathLike]) -> _P
is_absolute(self) -> bool
at: scripts.start_server
LazyVal(task: Callable[[], T1], tag: int)
at: scripts.start_server.LazyVal
get() -> T1
at: scripts.start_server.LazyVal.__init__
self.id = tag
at: traceback
print_exception(etype: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType], limit: Optional[int]=..., file: Optional[IO[str]]=..., chain: bool=...) -> None
at: transformers.modeling_utils.PreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_no_split_modules = None
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
_keys_to_ignore_on_load_missing = None
_keys_to_ignore_on_load_unexpected = None
_keys_to_ignore_on_save = None
_tied_weights_keys = None
is_parallelizable = False
supports_gradient_checkpointing = False
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., /, *, device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=..., tensor: Tensor)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.common
+ def to_rel_path(path: os.PathLike) -> RelPath:
- def to_rel_path(path: Path) -> RelPath:
+ path = Path(path)
if path.is_absolute():
raise ValueError(f"Expected a relative path, got: {path}")
return RelPath(path)
|
coeditor.service/EditPredictionService._suggest_edit_two_steps | Modified | temp-1 | f31f06a1e1fd394b2c2b740c36df77e1548ff8eb | Update training script to avoid OOM issue. | <0>:<add> pred_str = show_prediction(problem, pred)
| # module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
<s>").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
- pred_str = RetrievalDecodingResult.show_prediction(
- problem, pred
- )
<0> print(pred_str, file=f)
target_lines = target.target_lines
suggestions = list[EditSuggestion]()
for pred in predictions:
pred_change = self.apply_edit_to_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".join(
compute_line_diffs_fast(
splitlines(pred_change.before),
splitlines(pred_change.after),
)
)
input_status, change_status = compute_line_status(pred_change)
input_status = [
(i + target_lines[0], tag) for i, tag in input_status.items()
]
output_status = list(change_status.items())
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
</s> | ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -1
timed = self.tlogger.timed
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
target = self.get_target_code(span.code, problem, tk_prob)
def next_step():
batch = C3DataLoader.pack_batch([tk_prob])
original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
</s>
===========below chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: 1
<s>_status.items())
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=pred_change.after,
input_status=input_status,
output_status=output_status,
)
suggestions.append(suggestion)
return ServiceResponse(
target_file=str(self.project / file),
edit_start=(target_lines[0], 0),
edit_end=(target_lines[-1] + 1, 0),
target_lines=target.target_lines,
input_code=target.current_code,
suggestions=suggestions,
)
return target, next_step
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
RelPath = NewType("RelPath", Path)
splitlines(text: str) -> list[str]
===========unchanged ref 1===========
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
show_prediction(prob: C3Problem, pred: RetrievalModelPrediction) -> str
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
|
coeditor.model/RetrievalEditorModel.predict_on_batch | Modified | temp-1 | b6ecb23faa1b34ddc7e6c1bc2b1d09103ff0a7f5 | Fix marginalization bug in predict_on_batch. | <0>:<add> pred = tokens_to_change(delta.apply_to_change(original))
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ problems: Sequence[C3Problem],
- originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
<s>list([[x] * N for x in problems])
- originals = join_list([[x] * N for x in originals])
if (pred_scores := gen_out.get("sequences_scores", None)) is None:
pred_scores = [0.0] * len(out_tks)
if use_sampling:
pred_weights = [1.0 / N] * len(out_tks)
else:
pred_weights = [math.exp(x) for x in pred_scores]
with timed("assemble changes"):
pred_changes = list[Modified[str]]()
+ for prob, out in zip(problems, out_tks):
- for change_tks, out in zip(originals, out_tks):
+ delta = TkDelta.from_output_tks(prob.edit_line_ids, out)
+ original = prob.span.original.tolist()
- pred = tokens_to_change(inline_output_tokens(change_tks, out))
<0> pred_changes.append(pred)
assert_eq(len(pred_changes), len(out_tks), len(pred_scores))
solutions = list[list[PredictedChange]]()
for i in range(0, len(pred_changes), N):
sols = marginalize_preds(
pred_changes[i : i + N],
out_tks[i : i + N],
pred_weights[i : i + N],
pred_scores[i : i + N],
)
solutions.append(sols[:n_solutions])
return solutions
| ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ problems: Sequence[C3Problem],
- originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -1
<s>:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args()
input_ids = batch["input_ids"]
if not isinstance(input_ids, torch.LongTensor):
input_ids = torch.LongTensor(input_ids)
with timed("model.generate"), tqdm(total=dec_args.max_output_tks) as pbar:
gen_out = self.generate(
input_ids.to(self.device),
references=batch["references"],
query_ref_list=batch["query_ref_list"],
num_return_sequences=N,
return_dict_in_generate=True,
output_scores=True,
**gen_args,
tqdm=pbar,
)
assert not isinstance(gen_out, torch.LongTensor)
out_tks = gen_out["sequences"]
if isinstance(out_tks, torch.Tensor):
out_tks = out_tks.tolist()
out_tks = [remove_pad_ids(x) for x in out_tks]
assert isinstance(out_tks, list)
logging.debug("Max out length:", max(len(x) for x in out_tks))
+ assert_eq(len(out_tks), len(problems) * N)
- assert_eq(len(out_tks), len(originals) * N)
+ problems = join_list([[x] * N for x in problems])
- originals = join_list([[x] * N for x in</s>
===========above chunk 1===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ problems: Sequence[C3Problem],
- originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -2
<s>[TokenSeq],
weights: Sequence[float],
scores: Sequence[float],
) -> list[PredictedChange]:
"""For sampling techniques, all sample should have equal weights 1/N. For
search-based techniques, the `weights` should equal to the solutions' probabilities."""
assert preds
groups = groupby(
range(len(preds)),
keyfunc=lambda i: normalize_code_by_ast(preds[i].after),
)
groups = list(groups.values())
for group in groups:
# within each group, sort by score
group.sort(key=lambda i: scores[i], reverse=True)
groups.sort(
key=lambda g: (sum(weights[i] for i in g), scores[g[0]]), reverse=True
)
return [
PredictedChange(
preds[g[0]], out_tks[g[0]], sum(weights[i] for i in g), len(g)
)
for g in groups
]
use_sampling = dec_args.marginalize_samples > 1
if use_sampling:
assert_eq(dec_args.do_sample, True)
assert_eq(dec_args.num_beams, 1)
N = dec_args.marginalize_samples
else:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args</s>
===========above chunk 2===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
+ problems: Sequence[C3Problem],
- originals: Sequence[TokenSeq],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -3
"""
Returns nested list of shape `(batch_size, n_solutions)`.
"""
timed = self.tlogger.timed
def marginalize_preds(
preds: Sequence[Modified[str]],
out_tks</s>
===========unchanged ref 0===========
at: coeditor._utils
groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
normalize_code_by_ast(code: str, sort_keyargs: bool=True, remove_doc_string: bool=True) -> str
|
coeditor.service/EditPredictionService._suggest_edit_two_steps | Modified | temp-1 | b6ecb23faa1b34ddc7e6c1bc2b1d09103ff0a7f5 | Fix marginalization bug in predict_on_batch. | <0>:<add> for pred in predictions[:n_suggestions]:
| # module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
+ n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
<s>_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = show_prediction(problem, pred)
print(pred_str, file=f)
target_lines = target.target_lines
suggestions = list[EditSuggestion]()
- for pred in predictions:
<0> pred_change = self.apply_edit_to_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".join(
compute_line_diffs_fast(
splitlines(pred_change.before),
splitlines(pred_change.after),
)
)
input_status, change_status = compute_line_status(pred_change)
input_status = [
(i + target_lines[0], tag) for i, tag in input_status.items()
]
output_status = list(change_status.items())
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
new_code=pred_change.after,
input_status=input_status,
output_status=output_status,
)
suggestions</s> | ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
+ n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -1
<s> timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
target = self.get_target_code(span.code, problem, tk_prob)
def next_step():
batch = C3DataLoader.pack_batch([tk_prob])
- original = problem.span.original.tolist()
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
+ batch, [problem], self.dec_args, self.show_max_solutions
- batch, [original], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
+ n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -2
timed = self.tlogger.timed
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
</s>
===========below chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
+ n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: 1
<s>change.after,
input_status=input_status,
output_status=output_status,
)
suggestions.append(suggestion)
return ServiceResponse(
target_file=str(self.project / file),
edit_start=(target_lines[0], 0),
edit_end=(target_lines[-1] + 1, 0),
target_lines=target.target_lines,
input_code=target.current_code,
suggestions=suggestions,
)
return target, next_step
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
RelPath = NewType("RelPath", Path)
splitlines(text: str) -> list[str]
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
===========unchanged ref 1===========
show_prediction(prob: C3Problem, pred: RetrievalModelPrediction) -> str
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.service
EditSuggestion(map: Mapping[_KT, _VT], **kwargs: _VT)
EditSuggestion(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
EditSuggestion(**kwargs: _VT)
ServiceResponse(target_file: str, edit_start: tuple[int, int], edit_end: tuple[int, int], target_lines: Sequence[int], input_code: str, suggestions: list[EditSuggestion])
|
tests.test_edits/TestChangeIdentities.test_get_new_target_lines | Modified | temp-1 | fc86e1e97dcfdef8f300aa6cbc370b769496fcf8 | Implement keystroke_cost. | <0>:<add> new_edit_lines = delta1.get_new_line_ids(edit_lines)
| # module: tests.test_edits
class TestChangeIdentities:
def test_get_new_target_lines(self):
rng = get_rng()
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
n_origin_lines = len(tk_splitlines(original))
edit_lines = range(n_origin_lines + 1)
keys = tuple(delta.keys())
for _ in range(100):
n_keys = int(len(keys) * rng.random())
sub_keys = random_subset(keys, n_keys)
sub_keys.sort()
delta1, delta2 = delta.decompose_for_change(sub_keys)
- new_edit_lines = delta1.get_new_target_lines(edit_lines)
<0> new_edit_set = set(new_edit_lines)
for l in delta2.changed_lines():
if l not in new_edit_set and l != n_origin_lines:
print_err(f"{edit_lines=}")
print_err("original", SEP)
print_err(add_line_numbers(decode_tokens(original), start=0))
print_err(SEP)
print_err(f"{delta=}")
print_err(f"{sub_keys=}")
print_err(f"{delta1=}")
print_err("step1", SEP)
step1 = delta1.apply_to_change(original)
print_err(add_line_numbers(decode_tokens(step1), start=0))
print_err(SEP)
print_err(f"{new_edit_lines=}")
print_err(f"{delta2=}")
raise AssertionError(f"{l=} not in {new_edit_lines=}")
| ===========unchanged ref 0===========
at: coeditor._utils
add_line_numbers(code: str, start: int=1)
at: coeditor.common
SEP = "-" * 80
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
change_to_tokens(change: Change[str]) -> TokenSeq
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
from_change_tks(change_tks: TokenSeq) -> tuple[TokenSeq, "TkDelta"]
at: random.Random
random() -> float
at: tests.test_edits
get_rng()
===========unchanged ref 1===========
at: tests.test_edits.TestChangeIdentities
cases: dict[str, Change[str]] = {
"empty": Modified("", ""),
"generation": Modified("", "123"),
"add a new line": Modified("", "\n"),
"add a new line at end": Modified("a", "a\n"),
"added": Added("a\nb\nc\n"),
"deleted": Deleted("a\nb\nc\n"),
"no change": Modified(
dedent(
"""\
def f1():
x = 1
"""
),
dedent(
"""\
def f1():
x = 1
"""
),
),
"unchanged=True": Modified.from_unchanged(
dedent(
"""\
def f1():
x = 1
"""
),
),
# this test case cannot pass for some reason. Tokenizer bug?
# "leading_whitespace": Modified.from_unchanged(" ..."),
"replace last": Modified(
dedent(
"""\
def f1():
x = 1"""
),
dedent(
"""\
def f1():
x = 2
return x * 2"""
),
),
"no special tokens": Modified(
dedent(
"""\
def f1():
x = 1
y = 2
z = x + y
return z
def f2():
f1()"""
),
dedent(
"""\
# new comment
def f_new():
x = 1
if x > 0:
y = 2 * x
y *= 2
z = x + y
return z
def f2():
f1()
return f_new() + a
new_var = 0
"""
),
),
"with special tokens": Modified(
dedent(
"""\
def f1():
x = "<add>"
</s>
===========changed ref 0===========
# module: coeditor.common
+ def keystroke_cost(
+ input: str,
+ output: str,
+ cursor_jump_cost: int = 4,
+ init_curosr_dis: int | None = None, # default to cursor_jump_cost
+ ):
+ """
+ A string distance metric that takes the cost of moving the cursor into account.
+ This metric aim to approximate the number of key strokes required to
+ transform the input string into the output string.
+
+ Starting with the state `i = 0, j = 0, cursor_dis = init_curosr_dis, deleting = False`,
+ the cost is computed using the optimal combination of the following operations:
+ - M: match char (cost=0), require `input[i] == output[j], not deleting`, cause
+ `i += 1, j += 1, cursor_dis += 1`
+ - D: delete input char (cost=1), require `cursor_dis == 0, not deleting`, cause`i += 1`.
+ - A: add output char (cost=1), require `cursor_dis == 0, not deleting`, cause`j += 1`.
+ - C: bring cursor here (cost=min(curosr_dis, cursor_jump_cost)), require nothing, cause`cursor_dis = 0`.
+ - S: start deleting (cost=1), require `cursor_dis == 0, not deleting`, cause `deleting = True`.
+ - K: keep deleting (cost=0), require `deleting`, cause`i += 1`.
+ - E: end deleting (cost=1), require `cursor_dis == 0, deleting`, cause`deleting = False`.
+
+ Worst-case complexity: `len(input) * len(output) * cursor_jump_cost`.
+
+ Unmodeled operations:
+ - Copy and paste
+ """
+ l_in = len(input)
+ l_out = len(output)
+ MaxCost = l_in + l_out + cursor_jump_cost + 1000
+ CacheKey = tuple[int, int, int</s>
===========changed ref 1===========
# module: coeditor.common
+ def keystroke_cost(
+ input: str,
+ output: str,
+ cursor_jump_cost: int = 4,
+ init_curosr_dis: int | None = None, # default to cursor_jump_cost
+ ):
# offset: 1
<s>Cost = l_in + l_out + cursor_jump_cost + 1000
+ CacheKey = tuple[int, int, int, bool]
+ cache = dict[CacheKey, int]()
+
+ def rec(i: int, j: int, cursor_dis: int, deleting: bool) -> int:
+ "Return the cost of matching input[i:] and output[j:]]."
+ if i > l_in or j > l_out:
+ return MaxCost
+ if i == l_in:
+ if j == l_out and not deleting:
+ return 0 # don't need to care about curosr in this case
+ # type out all remaining chars
+ return cursor_dis + int(deleting) + (l_out - j)
+
+ key = (i, j, cursor_dis, deleting)
+ if key in cache:
+ return cache[key]
+
+ if deleting:
+ # end deleting
+ if cursor_dis > 0:
+ cost0 = 1 + cursor_dis + rec(i, j, cursor_dis=0, deleting=False)
+ else:
+ cost0 = MaxCost # not an option
+ # keep deleting
+ new_dis = min(cursor_dis + 1, cursor_jump_cost)
+ cost1 = rec(i + 1, j, new_dis, deleting=True)
+
+ best_cost = min(cost0, cost1)
+ else:
+ # match char
+ if i < l_in and j < l_out and input[i] == output[j]:
+ new_dis =</s> |
tests.test_edits/test_edit_lines_transform | Modified | temp-1 | fc86e1e97dcfdef8f300aa6cbc370b769496fcf8 | Implement keystroke_cost. | <0>:<add> new_target_lines = tk_delta.get_new_line_ids(later_lines)
| # module: tests.test_edits
def test_edit_lines_transform():
ex_code = dedent(
"""\
a
b
c
d
e
"""
)
ex_delta = StrDelta(
{
1: ("+1",),
2: ("+2",),
3: ("-",),
4: ("+d1", "+d2", "+d3"),
}
)
after_expect = dedent(
"""\
a
+1
b
+2
c
-d
+d1
+d2
+d3
e
"""
)
tk_delta = ex_delta.to_tk_delta()
all_lines = range(6)
+ new_target_lines = tk_delta.get_new_line_ids(all_lines)
- new_target_lines = tk_delta.get_new_target_lines(all_lines)
expect = (0, 1, 2, 3, 4, 6, 7, 8, 9, 10)
assert_eq(new_target_lines, expect)
later_lines = range(3, 6)
- new_target_lines = tk_delta.get_new_target_lines(later_lines)
<0> # only the last 5 lines should be edited
expect = (6, 7, 8, 9, 10)
assert_eq(new_target_lines, expect)
| ===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.encoding
StrDelta(_deltas: Mapping[int, tuple[str, ...]])
at: coeditor.encoding.StrDelta
_deltas: Mapping[int, tuple[str, ...]]
to_tk_delta() -> "TkDelta"
at: coeditor.encoding.TkDelta
get_new_line_ids(line_ids: Sequence[int]) -> Sequence[int]
at: textwrap
dedent(text: str) -> str
===========changed ref 0===========
# module: tests.test_edits
class TestChangeIdentities:
def test_get_new_target_lines(self):
rng = get_rng()
for name, c in self.cases.items():
original, delta = TkDelta.from_change_tks(change_to_tokens(c))
n_origin_lines = len(tk_splitlines(original))
edit_lines = range(n_origin_lines + 1)
keys = tuple(delta.keys())
for _ in range(100):
n_keys = int(len(keys) * rng.random())
sub_keys = random_subset(keys, n_keys)
sub_keys.sort()
delta1, delta2 = delta.decompose_for_change(sub_keys)
+ new_edit_lines = delta1.get_new_line_ids(edit_lines)
- new_edit_lines = delta1.get_new_target_lines(edit_lines)
new_edit_set = set(new_edit_lines)
for l in delta2.changed_lines():
if l not in new_edit_set and l != n_origin_lines:
print_err(f"{edit_lines=}")
print_err("original", SEP)
print_err(add_line_numbers(decode_tokens(original), start=0))
print_err(SEP)
print_err(f"{delta=}")
print_err(f"{sub_keys=}")
print_err(f"{delta1=}")
print_err("step1", SEP)
step1 = delta1.apply_to_change(original)
print_err(add_line_numbers(decode_tokens(step1), start=0))
print_err(SEP)
print_err(f"{new_edit_lines=}")
print_err(f"{delta2=}")
raise AssertionError(f"{l=} not in {new_edit_lines=}")
===========changed ref 1===========
# module: coeditor.common
+ def keystroke_cost(
+ input: str,
+ output: str,
+ cursor_jump_cost: int = 4,
+ init_curosr_dis: int | None = None, # default to cursor_jump_cost
+ ):
+ """
+ A string distance metric that takes the cost of moving the cursor into account.
+ This metric aim to approximate the number of key strokes required to
+ transform the input string into the output string.
+
+ Starting with the state `i = 0, j = 0, cursor_dis = init_curosr_dis, deleting = False`,
+ the cost is computed using the optimal combination of the following operations:
+ - M: match char (cost=0), require `input[i] == output[j], not deleting`, cause
+ `i += 1, j += 1, cursor_dis += 1`
+ - D: delete input char (cost=1), require `cursor_dis == 0, not deleting`, cause`i += 1`.
+ - A: add output char (cost=1), require `cursor_dis == 0, not deleting`, cause`j += 1`.
+ - C: bring cursor here (cost=min(curosr_dis, cursor_jump_cost)), require nothing, cause`cursor_dis = 0`.
+ - S: start deleting (cost=1), require `cursor_dis == 0, not deleting`, cause `deleting = True`.
+ - K: keep deleting (cost=0), require `deleting`, cause`i += 1`.
+ - E: end deleting (cost=1), require `cursor_dis == 0, deleting`, cause`deleting = False`.
+
+ Worst-case complexity: `len(input) * len(output) * cursor_jump_cost`.
+
+ Unmodeled operations:
+ - Copy and paste
+ """
+ l_in = len(input)
+ l_out = len(output)
+ MaxCost = l_in + l_out + cursor_jump_cost + 1000
+ CacheKey = tuple[int, int, int</s>
===========changed ref 2===========
# module: coeditor.common
+ def keystroke_cost(
+ input: str,
+ output: str,
+ cursor_jump_cost: int = 4,
+ init_curosr_dis: int | None = None, # default to cursor_jump_cost
+ ):
# offset: 1
<s>Cost = l_in + l_out + cursor_jump_cost + 1000
+ CacheKey = tuple[int, int, int, bool]
+ cache = dict[CacheKey, int]()
+
+ def rec(i: int, j: int, cursor_dis: int, deleting: bool) -> int:
+ "Return the cost of matching input[i:] and output[j:]]."
+ if i > l_in or j > l_out:
+ return MaxCost
+ if i == l_in:
+ if j == l_out and not deleting:
+ return 0 # don't need to care about curosr in this case
+ # type out all remaining chars
+ return cursor_dis + int(deleting) + (l_out - j)
+
+ key = (i, j, cursor_dis, deleting)
+ if key in cache:
+ return cache[key]
+
+ if deleting:
+ # end deleting
+ if cursor_dis > 0:
+ cost0 = 1 + cursor_dis + rec(i, j, cursor_dis=0, deleting=False)
+ else:
+ cost0 = MaxCost # not an option
+ # keep deleting
+ new_dis = min(cursor_dis + 1, cursor_jump_cost)
+ cost1 = rec(i + 1, j, new_dis, deleting=True)
+
+ best_cost = min(cost0, cost1)
+ else:
+ # match char
+ if i < l_in and j < l_out and input[i] == output[j]:
+ new_dis =</s>
===========changed ref 3===========
# module: coeditor.common
+ def keystroke_cost(
+ input: str,
+ output: str,
+ cursor_jump_cost: int = 4,
+ init_curosr_dis: int | None = None, # default to cursor_jump_cost
+ ):
# offset: 2
<s>cursor_dis + 1, cursor_jump_cost)
+ cost0 = rec(i + 1, j + 1, new_dis, False)
+ else:
+ cost0 = MaxCost # not an option
+ # delete input char
+ cost1 = 1 + rec(i + 1, j, 0, False) + cursor_dis
+ # add output char
+ cost2 = 1 + rec(i, j + 1, 0, False) + cursor_dis
+ # start deleting
+ cost3 = 1 + rec(i, j, 0, True) + cursor_dis
+
+ best_cost = min(cost0, cost1, cost2, cost3)
+ cache[key] = best_cost
+ return best_cost
+
+ if init_curosr_dis is None:
+ init_curosr_dis = cursor_jump_cost
+
+ return rec(0, 0, init_curosr_dis, False)
+ |
coeditor.c3problem/C3GeneratorCache.create_problem | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> code_span = replace(
| # module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
<s>module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
target_set = set(target_lines)
line_ids = list[int]()
input_l = target.line_range[0]
for i, tks in enumerate(tk_splitlines(changed_code)):
if tks and tks[0] == Del_id:
continue
if input_l in target_set:
line_ids.append(i)
input_l += 1
- code_span = dataclasses.replace(
<0> code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
)
relevant_unchanged = self.get_relevant_unchanged(code_span, target_usages)
relevant_changes = self.sort_changes(
code_span, relevant_unchanged, relevant_changes
)
prob = C3Problem(
code_span,
line_ids,
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
| ===========above chunk 0===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: -1
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
</s>
===========unchanged ref 0===========
at: coeditor.c3problem
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
LineUsageAnalysis(line2usages: Mapping[int, Sequence[PyDefinition]])
at: coeditor.c3problem.C3GeneratorCache
get_relevant_unchanged(this_change: ChangedCodeSpan, line_usages: LineUsageAnalysis)
max_distance_penalty = 1000
usage_bonus = 2000
sort_changes(target: ChangedCodeSpan, used_defs: Mapping[PyFullName, PyDefinition], changed: Sequence[ChangedCodeSpan]) -> Sequence[ChangedCodeSpan]
to_code_span(span: ChangedSpan)
at: coeditor.c3problem.C3GeneratorCache.__init__
self._header_cache = dict[ProjectPath, ChangedHeader]()
self._pre_def_cache = dict[ProjectPath, list[ChangedCodeSpan]]()
self._cspan_cache = dict[tuple[ModuleName, LineRange], ChangedCodeSpan]()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change.Added
after: E1
at: coeditor.change.Deleted
before: E1
at: coeditor.change.Modified
before: E1
===========unchanged ref 1===========
after: E1
unchanged: bool = False
at: coeditor.common
ModuleName = str
at: coeditor.common.ProjectPath
module: ModuleName
path: ElemPath
at: coeditor.encoding
Del_id = get_tk_id(Del)
tk_splitlines(tks: TokenSeq)
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
empty() -> "TkDelta"
at: coeditor.scoped_changes
ChangedSpan(change: Change[str], parent_scopes: Sequence[Change[ChangeScope]], line_range: LineRange)
JModuleChange(module_change: Change[JModule], changed: Sequence[ChangedSpan])
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class ChangedCodeSpan:
+ def get_change(self) -> Modified[str]:
+ change_tks = self.delta.apply_to_change(self.original.tolist())
+ return tokens_to_change(change_tks)
+
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def restrict_span_changes(self):
+ "restrict the changes in the span to the edit lines"
+ eids = self.edit_line_ids
+ delta = self.span.delta.for_input_range((eids[0], eids[-1] + 1))
+ span = replace(self.span, delta=delta)
+ return replace(self, span=span)
+
===========changed ref 2===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
+ @classmethod
+ def show_label(cls, i: int):
+ return f" <{i}>" if i <= 9 else f"<{i}>"
+
===========changed ref 3===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def get(self, key: DeltaKey) -> TokenSeq | None:
+ try:
+ return self[key]
+ except (KeyError, IndexError):
+ return None
+
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def changed_lines(self) -> Collection[int]:
+ lines = self._deltas.keys()
- return self._deltas.keys()
+ assert all(x for x in self._deltas.values())
+ return lines
===========changed ref 5===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
+ @classmethod
+ def show_line(cls, tks: TokenSeq):
+ if tks and tks[0] == Add_id:
+ return "+ " + decode_tokens(tks[1:])
+ elif tks and tks[0] == Del_id:
+ return "- " + decode_tokens(tks[1:])
+ else:
+ return " " + decode_tokens(tks)
+
===========changed ref 6===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def get_new_line_ids(self, line_ids: Sequence[int]) -> Sequence[int]:
+ """Given a list of lines to edit, return the corresponding new lines to edit
+ after applying this delta."""
+ if not line_ids:
+ return tuple()
+ last_line = line_ids[-1]
+ line_set = set(line_ids)
+ new_edit_lines = list[int]()
+ offset = 0
+ for l in range(last_line + 1):
+ deleted = False
+ for act in self.get_line_change(l):
+ if act[0] == Add_id:
+ if l in line_set:
+ new_edit_lines.append(l + offset)
+ offset += 1
+ elif act[0] == Del_id:
+ deleted = True
+ if not deleted and l in line_set:
+ new_edit_lines.append(l + offset)
+ return tuple(new_edit_lines)
+ |
coeditor.c3problem/C3ProblemSimpleSplit.transform | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> sub_prob = replace(
| # module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
- sub_prob = dataclasses.replace(
<0> prob, edit_line_ids=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
| ===========unchanged ref 0===========
at: abc
abstractmethod(callable: _FuncT) -> _FuncT
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3ProblemTransform()
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
delta: TkDelta
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class ChangedCodeSpan:
+ def get_change(self) -> Modified[str]:
+ change_tks = self.delta.apply_to_change(self.original.tolist())
+ return tokens_to_change(change_tks)
+
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def restrict_span_changes(self):
+ "restrict the changes in the span to the edit lines"
+ eids = self.edit_line_ids
+ delta = self.span.delta.for_input_range((eids[0], eids[-1] + 1))
+ span = replace(self.span, delta=delta)
+ return replace(self, span=span)
+
===========changed ref 2===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
relevant_changes = list[ChangedCodeSpan]()
changed = dict(changed)
module = target.module
target_mc = changed.pop(module)
all_mc = [target_mc] + list(changed.values())
for mc in all_mc:
is_target_mc = mc.module_change.earlier.mname == module
for cspan in mc.changed:
if not is_target_mc or cspan.line_range != target.line_range:
relevant_changes.append(self.to_code_span(cspan))
code_span = self.to_code_span(target)
changed_code = code_span.delta.apply_to_change(code_span.original.tolist())
target_set = set(target_lines)
line_ids = list[int]()
input_l = target.line_range[0]
for i, tks in enumerate(tk_splitlines(changed_code)):
if tks and tks[0] == Del_id:
continue
if input_l in target_set:
line_ids.append(i)
input_l += 1
+ code_span = replace(
- code_span = dataclasses.replace(
code_span, original=TkArray.new(changed_code), delta=TkDelta.empty()
)
relevant_unchanged = self.get_relevant_unchanged(code_span, target_usages)
relevant_changes = self.sort_changes(
code_span, relevant_unchanged, relevant_changes
)
prob = C3Problem(
code_span,
line_ids,
relevant_changes=relevant_changes,
relevant</s>
===========changed ref 3===========
# module: coeditor.c3problem
class C3GeneratorCache:
def create_problem(
self,
target: ChangedSpan,
target_lines: Sequence[int],
changed: Mapping[ModuleName, JModuleChange],
target_usages: LineUsageAnalysis,
src_info: SrcInfo,
) -> C3Problem:
# offset: 1
<s> = C3Problem(
code_span,
line_ids,
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=target.change.map(lambda _: None),
src_info=src_info,
)
return prob
===========changed ref 4===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
+ @classmethod
+ def show_label(cls, i: int):
+ return f" <{i}>" if i <= 9 else f"<{i}>"
+
===========changed ref 5===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def get(self, key: DeltaKey) -> TokenSeq | None:
+ try:
+ return self[key]
+ except (KeyError, IndexError):
+ return None
+
===========changed ref 6===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
def changed_lines(self) -> Collection[int]:
+ lines = self._deltas.keys()
- return self._deltas.keys()
+ assert all(x for x in self._deltas.values())
+ return lines
===========changed ref 7===========
# module: coeditor.encoding
class TokenizedEdit(ABC):
+ @classmethod
+ def show_line(cls, tks: TokenSeq):
+ if tks and tks[0] == Add_id:
+ return "+ " + decode_tokens(tks[1:])
+ elif tks and tks[0] == Del_id:
+ return "- " + decode_tokens(tks[1:])
+ else:
+ return " " + decode_tokens(tks)
+
===========changed ref 8===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def get_new_line_ids(self, line_ids: Sequence[int]) -> Sequence[int]:
+ """Given a list of lines to edit, return the corresponding new lines to edit
+ after applying this delta."""
+ if not line_ids:
+ return tuple()
+ last_line = line_ids[-1]
+ line_set = set(line_ids)
+ new_edit_lines = list[int]()
+ offset = 0
+ for l in range(last_line + 1):
+ deleted = False
+ for act in self.get_line_change(l):
+ if act[0] == Add_id:
+ if l in line_set:
+ new_edit_lines.append(l + offset)
+ offset += 1
+ elif act[0] == Del_id:
+ deleted = True
+ if not deleted and l in line_set:
+ new_edit_lines.append(l + offset)
+ return tuple(new_edit_lines)
+ |
coeditor.c3problem/C3ProblemChangeDropout.transform | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> sub_prob = replace(
| # module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
<s>.new(delta1.apply_to_change(original.tolist()))
new_trans = prob.transformations + ("split", "dropout")
- new_span = dataclasses.replace(
+ new_span = replace(prob.span, original=new_original, delta=delta2)
- prob.span, original=new_original, delta=delta2
- )
else:
new_trans = prob.transformations + ("split",)
new_span = prob.span
delta1 = None
delta2_groups = delta.change_groups()
prob_and_n = list[tuple[C3Problem, int]]()
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
edit_line_ids = range(i, j)
if delta1 is not None:
+ edit_line_ids = delta1.get_new_line_ids(edit_line_ids)
- edit_line_ids = delta1.get_new_target_lines(edit_line_ids)
line_set = set(edit_line_ids)
n_groups = sum(any(key[0] in line_set for key in g) for g in delta2_groups)
if n_groups > 0:
- sub_prob = dataclasses.replace(
<0> prob,
span=new_span,
edit_line_ids=edit_line_ids,
transformations=new_trans,
)
prob_and_n.append((sub_prob, n_groups))
# return the problems with the most changes
prob_and_n.sort(key=lambda p: p[1], reverse=True)
probs = [p[0] for p in prob_and_n]
return probs[: self.max_split_factor]
| ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: -1
<s> should_dropout = len(grouped_keys) >= 2
if should_dropout:
n_to_drop = int(
len(grouped_keys) * random.random() * self.max_dropout_ratio
)
assert n_to_drop < len(grouped_keys)
keys_to_drop = join_list(
random_subset(grouped_keys, n_to_drop, rng=self._rng)
)
else:
keys_to_drop = []
if keys_to_drop:
delta1, delta2 = delta.decompose_for_change(keys_to_drop)
if random.random() < self._test_prob:
result1 = delta2.apply_to_change(
delta1.apply_to_change(original.tolist())
)
result2 = delta.apply_to_change(original.tolist())
code1 = tokens_to_change(result1).after
code2 = tokens_to_change(result2).after
if code1 != code2:
print_sections(
("result1", decode_tokens(result1)),
("result2", decode_tokens(result2)),
("delta", str(delta)),
("keys_to_drop", str(keys_to_drop)),
("delta1", str(delta1)),
("delta2", str(delta2)),
)
raise AssertionError("decompose_for_change failed.")
delta2_groups = delta2.change_groups()
if not delta2_groups:
print_err(f"{delta=}, {keys_to_drop=}, {delta1=}")
raise AssertionError("Empty delta2_groups")
new_original = TkArray.new(delta1.apply_to_change(original.tolist()))
new_trans = prob.transformations + ("</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemChangeDropout(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
# offset: -2
original = prob.span.original
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
grouped_keys = delta.change_groups()
should_dropout = len(grouped_keys) >= 2
if should_dropout:
n_to_drop = int(
</s>
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTransform
transform(self, prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.c3problem.ChangedCodeSpan
original: TkArray
delta: TkDelta
at: coeditor.change.Modified
after: E1
at: coeditor.common
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
print_sections(*, sep: str=SEP, file: TextIO=sys.stdout) -> None
random_subset(all: Mapping[T1, T2], n: int, rng: random.Random | int | None=None) -> dict[T1, T2]
random_subset(all: Sequence[T1], n: int, rng: random.Random | int | None=None) -> list[T1]
print_err(*, sep: Optional[str]=..., end: Optional[str]=..., flush: bool=...) -> None
at: coeditor.encoding
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
apply_to_change(change: TokenSeq) -> TokenSeq
decompose_for_change(first_keys: Collection[DeltaKey]) -> tuple[Self, Self]
change_groups() -> Sequence[tuple[DeltaKey, ...]]
===========unchanged ref 1===========
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
replace(obj: _T, **changes: Any) -> _T
at: random
Random(x: Any=...)
random = _inst.random
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemSimpleSplit(C3ProblemTransform):
def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
delta = prob.span.delta
l_range = prob.edit_line_ids
assert isinstance(l_range, range)
start, stop = l_range.start, l_range.stop
problems = list[C3Problem]()
new_trans = prob.transformations + ("split",)
for i in range(start, stop, self.max_lines_to_edit):
j = min(i + self.max_lines_to_edit, stop)
sub_delta = delta.for_input_range((i, j))
if sub_delta.num_changes() > 0:
+ sub_prob = replace(
- sub_prob = dataclasses.replace(
prob, edit_line_ids=range(i, j), transformations=new_trans
)
problems.append(sub_prob)
if len(problems) >= self.max_split_factor:
break
return problems
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class ChangedCodeSpan:
+ def get_change(self) -> Modified[str]:
+ change_tks = self.delta.apply_to_change(self.original.tolist())
+ return tokens_to_change(change_tks)
+
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class C3Problem:
+ def restrict_span_changes(self):
+ "restrict the changes in the span to the edit lines"
+ eids = self.edit_line_ids
+ delta = self.span.delta.for_input_range((eids[0], eids[-1] + 1))
+ span = replace(self.span, delta=delta)
+ return replace(self, span=span)
+ |
coeditor.model/RetrievalEditorModel.predict_on_batch | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> assert not isinstance(gen_out, LongTensor)
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
problems: Sequence[C3Problem],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
<s>_ids = torch.LongTensor(input_ids)
+ with timed("model.generate"), tqdm(
+ total=dec_args.max_output_tks, disable=not dec_args.do_sample
+ ) as pbar:
- with timed("model.generate"), tqdm(total=dec_args.max_output_tks) as pbar:
gen_out = self.generate(
input_ids.to(self.device),
references=batch["references"],
query_ref_list=batch["query_ref_list"],
num_return_sequences=N,
return_dict_in_generate=True,
output_scores=True,
**gen_args,
tqdm=pbar,
)
- assert not isinstance(gen_out, torch.LongTensor)
<0> out_tks = gen_out["sequences"]
if isinstance(out_tks, torch.Tensor):
out_tks = out_tks.tolist()
out_tks = [remove_pad_ids(x) for x in out_tks]
assert isinstance(out_tks, list)
logging.debug("Max out length:", max(len(x) for x in out_tks))
assert_eq(len(out_tks), len(problems) * N)
problems = join_list([[x] * N for x in problems])
if (pred_scores := gen_out.get("sequences_scores", None)) is None:
pred_scores = [0.0] * len(out_tks)
if use_sampling:
pred_weights = [1.0 / N] * len(out_tks)
</s> | ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
problems: Sequence[C3Problem],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -1
<s> search-based techniques, the `weights` should equal to the solutions' probabilities."""
+ """
assert preds
groups = groupby(
range(len(preds)),
keyfunc=lambda i: normalize_code_by_ast(preds[i].after),
)
groups = list(groups.values())
for group in groups:
# within each group, sort by score
group.sort(key=lambda i: scores[i], reverse=True)
groups.sort(
key=lambda g: (sum(weights[i] for i in g), scores[g[0]]), reverse=True
)
return [
PredictedChange(
preds[g[0]], out_tks[g[0]], sum(weights[i] for i in g), len(g)
)
for g in groups
]
use_sampling = dec_args.marginalize_samples > 1
if use_sampling:
assert_eq(dec_args.do_sample, True)
assert_eq(dec_args.num_beams, 1)
N = dec_args.marginalize_samples
else:
N = dec_args.num_beams or 1
gen_args = dec_args.to_model_args()
input_ids = batch["input_ids"]
+ if not isinstance(input_ids, LongTensor):
- if not isinstance(input_ids, torch.LongTensor):
+ input_ids = LongTensor(input_ids)
- input_ids = torch.LongTensor(input_ids)
+ with timed("model.generate"), tqdm(
+ total=</s>
===========above chunk 1===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
problems: Sequence[C3Problem],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: -2
"""
Returns nested list of shape `(batch_size, n_solutions)`.
"""
timed = self.tlogger.timed
def marginalize_preds(
preds: Sequence[Modified[str]],
out_tks: Sequence[TokenSeq],
weights: Sequence[float],
scores: Sequence[float],
) -> list[PredictedChange]:
"""For sampling techniques, all sample should have equal weights 1/N. For
+ search-based techniques, the `weights` should equal to the solutions' probabilities.
- search-based techniques, the `weights` should equal to the solutions' probabilities."""
+ """
assert preds
</s>
===========below chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
@torch.autocast("cuda")
def predict_on_batch(
self,
batch: dict,
problems: Sequence[C3Problem],
dec_args: DecodingArgs,
n_solutions: int = 1,
) -> list[list[PredictedChange]]:
# offset: 1
<s>
if use_sampling:
pred_weights = [1.0 / N] * len(out_tks)
else:
pred_weights = [math.exp(x) for x in pred_scores]
with timed("assemble changes"):
pred_changes = list[Modified[str]]()
for prob, out in zip(problems, out_tks):
delta = TkDelta.from_output_tks(prob.edit_line_ids, out)
original = prob.span.original.tolist()
pred = tokens_to_change(delta.apply_to_change(original))
pred_changes.append(pred)
assert_eq(len(pred_changes), len(out_tks), len(pred_scores))
solutions = list[list[PredictedChange]]()
for i in range(0, len(pred_changes), N):
sols = marginalize_preds(
pred_changes[i : i + N],
out_tks[i : i + N],
pred_weights[i : i + N],
pred_scores[i : i + N],
)
solutions.append(sols[:n_solutions])
return solutions
===========unchanged ref 0===========
at: coeditor._utils
groupby(iterable: Iterable[T1], keyfunc: Callable[[T1], T2]) -> dict[T2, list[T1]]
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(name: str)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
TokenSeq = list[Token]
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
normalize_code_by_ast(code: str, sort_keyargs: bool=True, remove_doc_string: bool=True) -> str
|
coeditor.model/RetrievalEditorModel.forward | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction=loss_reduction)
| <s> decoder_input_ids: LongTensor | None = None,
decoder_inputs_embeds: Tensor | None = None,
decoder_attention_mask: Tensor | None = None,
past_key_values=None,
use_cache=None,
+ loss_reduction: LossReduction = "mean",
# not used args below
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> Seq2SeqLMOutput:
<s>.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
except torch.cuda.OutOfMemoryError: # type: ignore
total_ref_len = sum(len(x) for x in references) if references else 0
n_references = len(references) if references else 0
if input_ids is not None:
print(f"{input_ids.shape = }")
if labels is not None:
print(f"{labels.shape = }")
print(f"{n_references = }, {total_ref_len = }")
raise
loss = None
if labels is not None:
- loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
<0> loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=cast(Any, encoder_outputs.last_hidden_state),
# encoder_hidden_states=encoder_outputs.hidden_states,
# encoder_attentions=encoder_outputs.attentions,
)
| ===========above chunk 0===========
<s> LongTensor | None = None,
decoder_inputs_embeds: Tensor | None = None,
decoder_attention_mask: Tensor | None = None,
past_key_values=None,
use_cache=None,
+ loss_reduction: LossReduction = "mean",
# not used args below
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> Seq2SeqLMOutput:
# offset: -1
<s>.forward"):
dec_hidden_states = batched_map(
last_states,
group_key=decode_group,
f=run_decoder,
)
decoder_outputs = BaseModelOutputWithPastAndCrossAttentions(
cast(FloatTensor, stack_pad_tensors(dec_hidden_states)[0])
)
else:
# use simple batching for decoding
with self.tlogger.timed("decoder.forward"):
decoder_outputs = self.decoder.forward(
input_ids=decoder_input_ids,
inputs_embeds=decoder_inputs_embeds,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs.last_hidden_state,
encoder_attention_mask=encoder_outputs.hidden_state_mask,
past_key_values=past_key_values,
use_cache=use_cache,
return_dict=True,
)
assert isinstance(
decoder_outputs, BaseModelOutputWithPastAndCrossAttentions
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
</s>
===========above chunk 1===========
<s> LongTensor | None = None,
decoder_inputs_embeds: Tensor | None = None,
decoder_attention_mask: Tensor | None = None,
past_key_values=None,
use_cache=None,
+ loss_reduction: LossReduction = "mean",
# not used args below
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> Seq2SeqLMOutput:
# offset: -2
<s> dict)
s1 = _round_length_group(enc_r["decoder_input_ids"].size(0))
s2 = _round_length_group(enc_r["encoder_hidden_states"].size(0))
return (s1, s2)
try:
if encoder_outputs is None:
assert input_ids is not None
encoder = self.get_encoder()
with self.tlogger.timed("encoder.forward"):
encoder_outputs = encoder.forward(
input_ids, references, query_ref_list
)
if labels is not None and decoder_input_ids is None:
# get decoder inputs from shifting lm labels to the right
assert_eq(labels.dtype, torch.long)
last_hidden = encoder_outputs.last_hidden_state
last_mask = not_none(encoder_outputs.hidden_state_mask)
last_states = [
{
"encoder_hidden_states": last_hidden[i][last_mask[i]],
"decoder_input_ids": self._shift_right(
labels[i : i + 1]
).squeeze(0),
}
for i in range(last_hidden.size(0))
]
with self.tlogger.timed("decoder.forward"):
dec_hidden_states = batched_map(
last_states,
group_key=decode</s>
===========above chunk 2===========
<s> LongTensor | None = None,
decoder_inputs_embeds: Tensor | None = None,
decoder_attention_mask: Tensor | None = None,
past_key_values=None,
use_cache=None,
+ loss_reduction: LossReduction = "mean",
# not used args below
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> Seq2SeqLMOutput:
# offset: -3
<s> 2)
def run_decoder(enc_results: Sequence[dict]):
if len(enc_results) == 1:
decoder_input_ids = enc_results[0]["decoder_input_ids"].unsqueeze(0)
encoder_hidden_states = enc_results[0][
"encoder_hidden_states"
].unsqueeze(0)
decoder_attention_mask = None
encoder_attention_mask = None
else:
decoder_input_ids, decoder_attention_mask = stack_pad_tensors(
[x["decoder_input_ids"] for x in enc_results]
)
encoder_hidden_states, encoder_attention_mask = stack_pad_tensors(
[x["encoder_hidden_states"] for x in enc_results]
)
decoder_outputs = self.decoder.forward(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
return_dict=True,
)
assert isinstance(
decoder_outputs, BaseModelOutputWithPastAndCrossAttentions
)
n = len(enc_results)
return [decoder_outputs.last_hidden_state[i] for i in range(n)]
def decode_group(enc_r: dict):
assert isinstance(enc_</s>
===========above chunk 3===========
<s> LongTensor | None = None,
decoder_inputs_embeds: Tensor | None = None,
decoder_attention_mask: Tensor | None = None,
past_key_values=None,
use_cache=None,
+ loss_reduction: LossReduction = "mean",
# not used args below
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> Seq2SeqLMOutput:
# offset: -4
"""
Shapes
- input_ids: (n_queries, query_len)
- labels: (n_queries, label_len)
"""
if labels is not None:
assert_eq(labels.</s> |
coeditor.model/RetrievalEditorModel.sample | Modified | temp-1 | 6221bbafc47d1d86eb72669e4373c1cf4cdc6d9b | Add multi-round editing gain. | <0>:<add> fake_input = LongTensor([[1] * t]).to(device)
| <s> max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
):
<s>id
subset_ids = torch.arange(len(unfinished_ids), device=device)[next_subset]
unfinished_ids = unfinished_ids[next_subset]
# update generated ids, model inputs, and length for next step
input_ids = cast(
+ LongTensor,
- torch.LongTensor,
next_tokens[next_subset].unsqueeze(-1),
)
assert_eq(input_ids.ndim, 2)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(
model_kwargs["past"], subset_ids
)
if (pbar := model_kwargs.get("tqdm")) is not None:
pbar = cast(tqdm, pbar)
pbar.set_postfix({"unfinished": len(unfinished_ids)})
pbar.update()
# stop when each sentence is finished, or if we exceed the maximum length
- fake_input = torch.LongTensor([[1] * t]).to(device)
<0> t += 1
if len(unfinished_ids) == 0 or stopping_criteria(fake_input, None): # type: ignore
break
if return_dict_in_generate:
return {"sequences": sequences, "sequences_scores": sequences_scores}
else:
return sequences
| ===========above chunk 0===========
<s>int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
):
# offset: -1
<s>input_ids.shape[0])]
# TODO: reduce cost using particle weights
# auto-regressive generation
t = 0
while True:
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self.forward(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = cast(FloatTensor, outputs.logits[:, -1, :])
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
assert_eq(next_tokens.ndim, 1)
for i, id in enumerate(unfinished_ids.tolist()):
sequences_scores[id] += math.log(probs[i, next_tokens[i]].item())
sequences[id].append(next_tokens[i].item())
next_subset = next_tokens != eos_token_id
subset_ids = torch.arange(len(unfinished_ids), device=device)[next_subset]
un</s>
===========above chunk 1===========
<s>int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
):
# offset: -2
"""An optimized sample implementation that does not waste computation
on finished sequences."""
# init values
if logits_processor is None:
logits_processor = LogitsProcessorList()
if stopping_criteria is None:
stopping_criteria = StoppingCriteriaList()
if logits_warper is None:
logits_warper = LogitsProcessorList()
if pad_token_id is None:
pad_token_id = self.config.pad_token_id
if eos_token_id is None:
eos_token_id = self.config.eos_token_id
device = self.device
# keep track of which sequences are already finished
+ unfinished_ids = LongTensor(range(input_ids.shape[0])).to(device)
- unfinished_ids = torch.LongTensor(range(input_ids.shape[0])).to(device)
sequences = input_ids.int().tolist()
sequences_scores = [0.0 for _ in range(input_ids.shape[0])]
# TODO: reduce cost using particle weights
# auto-regressive generation
</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.model.RetrievalEditorModel
forward(input_ids: LongTensor | None=None, references: Sequence[TokenSeq] | None=None, query_ref_list: Sequence[Sequence[int]] | None=None, labels: LongTensor | None=None, encoder_outputs: "RetrivalEncoderOutputs | None"=None, decoder_input_ids: LongTensor | None=None, decoder_inputs_embeds: Tensor | None=None, decoder_attention_mask: Tensor | None=None, past_key_values=None, use_cache=None, loss_reduction: LossReduction="mean", output_attentions=None, output_hidden_states=None, return_dict=None, tqdm=None) -> Seq2SeqLMOutput
prepare_inputs_for_generation(input_ids, encoder_outputs=None, past=None, use_cache=None, **kwargs)
_reorder_cache(past, beam_idx)
at: coeditor.model.RetrievalEditorModel._reorder_cache
reordered_decoder_past = ()
reordered_decoder_past = reordered_decoder_past + (
reordered_layer_past_states,
)
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(
0, beam_idx.to(layer_past_state.device)
),
)
at: math
log(x: SupportsFloat, base: SupportsFloat=...) -> float
===========unchanged ref 1===========
at: torch._C._VariableFunctions
arange(end: Union[Number, _complex], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
arange(start: Number, end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex]=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
multinomial(input: Tensor, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor
at: torch.autograd.grad_mode
no_grad()
|
coeditor.common/keystroke_cost | Modified | temp-1 | cb1121e6d3570256308a1199501817b911ebaa90 | Improve multi-round edit gain evaluation. | <0>:<add> return costs[(l_in, l_out, init_curosr_dis, False)]
| # module: coeditor.common
def keystroke_cost(
input: str,
output: str,
cursor_jump_cost: int = 4,
init_curosr_dis: int | None = None, # default to cursor_jump_cost
):
<s> = rec(i + 1, j, new_dis, deleting=True)
+ cost3 = costs[(i, j, 0, True)]
- best_cost = min(cost0, cost1)
- else:
+ best_cost = min(cost1, cost2, cost3) + 1 + cursor_dis
+ # match char
- # match char
+ if i_char == j_char:
- if i < l_in and j < l_out and input[i] == output[j]:
+ new_dis = min(cursor_dis + 1, cursor_jump_cost)
- new_dis = min(cursor_dis + 1, cursor_jump_cost)
- cost0 = rec(i + 1, j + 1, new_dis, False)
- else:
- cost0 = MaxCost # not an option
- # delete input char
- cost1 = 1 + rec(i + 1, j, 0, False) + cursor_dis
- # add output char
- cost2 = 1 + rec(i, j + 1, 0, False) + cursor_dis
- # start deleting
- cost3 = 1 + rec(i, j, 0, True) + cursor_dis
-
- best_cost = min(cost0, cost1, cost2, cost3)
- cache[key] = best_cost
- return best_cost
+ best_cost = min(best_cost, costs[(i - 1, j - 1, new_dis, False)])
+ costs[(i, j, cursor_dis, False)] = best_cost
if init_curosr_dis is None:
init_curosr_dis = cursor_jump_cost
- return rec(0, 0, init_curosr_dis, False)
<0>
| ===========above chunk 0===========
# module: coeditor.common
def keystroke_cost(
input: str,
output: str,
cursor_jump_cost: int = 4,
init_curosr_dis: int | None = None, # default to cursor_jump_cost
):
# offset: -1
<s>i] if i > 0 else None
+ for j in j_range:
+ j_char = output[-j] if j > 0 else None
+ for cursor_dis in range(cursor_jump_cost + 1):
+ # --- if deleting ---
+ # 1: keep deleting
+ new_dis = min(cursor_dis + 1, cursor_jump_cost)
+ best_cost = costs[(i - 1, j, new_dis, True)] if i > 0 else MaxCost
+ # 2: end deleting
+ if cursor_dis > 0:
+ best_cost = min(best_cost, 1 + cursor_dis + costs[(i, j, 0, False)])
+ costs[(i, j, cursor_dis, True)] = best_cost
- key = (i, j, cursor_dis, deleting)
- if key in cache:
- return cache[key]
+ # --- if not deleting ---
+ # 1: delete input char
+ cost1 = costs[(i - 1, j, 0, False)] if i > 0 else MaxCost
+ # 2: add output char
+ cost2 = costs[(i, j - 1, 0, False)] if j > 0 else MaxCost
- if deleting:
+ # 3: start deleting
- # end deleting
- if cursor_dis > 0:
- cost0 = 1 + cursor_dis + rec(i, j, cursor_dis=0, deleting=False)
- else:
- cost0 = MaxCost # not an option
- # keep deleting
- new_dis = min(cursor_dis + 1, cursor_jump_cost)
- cost1 = rec(i + 1, j, new_dis, deleting=True)
+ cost3 = costs[(i, j,</s>
===========above chunk 1===========
# module: coeditor.common
def keystroke_cost(
input: str,
output: str,
cursor_jump_cost: int = 4,
init_curosr_dis: int | None = None, # default to cursor_jump_cost
):
# offset: -2
<s> cause`deleting = False`.
Worst-case complexity: `len(input) * len(output) * cursor_jump_cost`.
Unmodeled operations:
- Copy and paste
"""
l_in = len(input)
l_out = len(output)
MaxCost = l_in + l_out + cursor_jump_cost + 1000
CacheKey = tuple[int, int, int, bool]
+ costs = dict[CacheKey, int]()
- cache = dict[CacheKey, int]()
- def rec(i: int, j: int, cursor_dis: int, deleting: bool) -> int:
- "Return the cost of matching input[i:] and output[j:]]."
- if i > l_in or j > l_out:
- return MaxCost
- if i == l_in:
- if j == l_out and not deleting:
- return 0 # don't need to care about curosr in this case
- # type out all remaining chars
- return cursor_dis + int(deleting) + (l_out - j)
+ for c in range(cursor_jump_cost + 1):
+ costs[(0, 0, c, False)] = 0
+ costs[(0, 0, c, True)] = c + 1
+ for i in range(l_in + 1):
+ j_range = range(l_out + 1) if i != 0 else range(1, l_out + 1)
+ i_char = input[-i] if i > 0 else None
+ for j in j_range:
+ j_char = output[-j] if</s>
===========above chunk 2===========
# module: coeditor.common
def keystroke_cost(
input: str,
output: str,
cursor_jump_cost: int = 4,
init_curosr_dis: int | None = None, # default to cursor_jump_cost
):
# offset: -3
<s>_dis, deleting = False`,
- Starting with the state `i = 0, j = 0, cursor_dis = init_curosr_dis, deleting = False`,
the cost is computed using the optimal combination of the following operations:
+ - M: match char (cost=0), require `input[-i] == output[-j], not deleting`, cause
- - M: match char (cost=0), require `input[i] == output[j], not deleting`, cause
+ `i -= 1, j -= 1, cursor_dis += 1`
- `i += 1, j += 1, cursor_dis += 1`
+ - D: delete input char (cost=1), require `cursor_dis == 0, not deleting`, cause`i -= 1`.
- - D: delete input char (cost=1), require `cursor_dis == 0, not deleting`, cause`i += 1`.
+ - A: add output char (cost=1), require `cursor_dis == 0, not deleting`, cause`j -= 1`.
- - A: add output char (cost=1), require `cursor_dis == 0, not deleting`, cause`j += 1`.
- C: bring cursor here (cost=min(curosr_dis, cursor_jump_cost)), require nothing, cause`cursor_dis = 0`.
- S: start deleting (cost=1), require `cursor_dis == 0, not deleting`, cause `deleting = True`.
+ - K: keep deleting (cost=0), require `deleting`, cause`i -= 1`.
- - K: keep deleting (cost=0), require `deleting`, cause`i += 1`.
- E: end deleting (cost=1), require `cursor_dis == 0,</s>
===========above chunk 3===========
# module: coeditor.common
def keystroke_cost(
input: str,
output: str,
cursor_jump_cost: int = 4,
init_curosr_dis: int | None = None, # default to cursor_jump_cost
):
# offset: -4
"""
A string distance metric that takes the cost of moving the cursor into account.
This metric aim to approximate the number of key strokes required to
transform the input string into the output string.
+ Starting with the state `i = len(input), j = len(output), cursor_dis = init_cur</s> |
coeditor.c3problem/C3ProblemGenerator.process_change | Modified | temp-1 | cb1121e6d3570256308a1199501817b911ebaa90 | Improve multi-round edit gain evaluation. | <0>:<add> and (len(span.change.later) <= self.max_span_chars)
| # module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
def process_change(
self,
pchange: JProjectChange,
mod2usages: Mapping[ModuleName, LineUsageAnalysis],
module_order: Sequence[ModuleName],
) -> Sequence[C3Problem]:
<s>map)
processed_cspans = list[ChangedCodeSpan]()
problems = list[C3Problem]()
for m in module_order:
if (mchange := pchange.changed.get(m)) is None:
continue
if not (usages := mod2usages.get(m)):
usages = LineUsageAnalysis({})
warnings.warn("Unexpected: usages missing for module: " + str(m))
for span in mchange.changed:
code_span = cache.to_code_span(span)
should_mk_problem = (
(span.change.as_char() == Modified.as_char())
and (self._is_training or span._is_func_body())
+ and (len(span.change.earlier) <= self.max_span_chars)
<0> and (count_lines(span.change.earlier) <= self.max_span_lines)
and (count_lines(span.change.later) <= self.max_span_lines)
)
if should_mk_problem:
# latest changes are more relevant
relevant_unchanged = cache.get_relevant_unchanged(code_span, usages)
relevant_changes = list(reversed(processed_cspans))
relevant_changes = cache.sort_changes(
code_span, relevant_unchanged, relevant_changes
)
src_info: SrcInfo = {
"project": pchange.project_name,
"commit": pchange.commit_info,
}
n_lines = span.line_range[1] - span.line_range[0]
prob = C3Problem(
code_span,
range(0, n_lines + 1), # one additional line for</s> | ===========above chunk 0===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
def process_change(
self,
pchange: JProjectChange,
mod2usages: Mapping[ModuleName, LineUsageAnalysis],
module_order: Sequence[ModuleName],
) -> Sequence[C3Problem]:
# offset: -1
before_mod_map = {m.mname: m for m in pchange.all_modules.before}
cache = C3GeneratorCache(before_mod_map)
processed_cspans = list[ChangedCodeSpan]()
problems = list[C3Problem]()
</s>
===========below chunk 0===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
def process_change(
self,
pchange: JProjectChange,
mod2usages: Mapping[ModuleName, LineUsageAnalysis],
module_order: Sequence[ModuleName],
) -> Sequence[C3Problem]:
# offset: 1
<s> prob = C3Problem(
code_span,
range(0, n_lines + 1), # one additional line for appending
relevant_changes=relevant_changes,
relevant_unchanged=relevant_unchanged,
change_type=span.change.map(lambda _: None),
src_info=src_info,
)
problems.append(prob)
processed_cspans.append(code_span)
return problems
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: coeditor.c3problem
ChangedCodeSpan(headers: Sequence[ChangedHeader], original: TkArray, delta: TkDelta, line_range: LineRange, module: ModuleName)
SrcInfo(map: Mapping[_KT, _VT], **kwargs: _VT)
SrcInfo(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
SrcInfo(**kwargs: _VT)
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
LineUsageAnalysis(line2usages: Mapping[int, Sequence[PyDefinition]])
C3GeneratorCache(pre_module_map: Mapping[ModuleName, JModule])
at: coeditor.c3problem.C3GeneratorCache
get_relevant_unchanged(this_change: ChangedCodeSpan, line_usages: LineUsageAnalysis)
max_distance_penalty = 1000
usage_bonus = 2000
sort_changes(target: ChangedCodeSpan, used_defs: Mapping[PyFullName, PyDefinition], changed: Sequence[ChangedCodeSpan]) -> Sequence[ChangedCodeSpan]
to_code_span(span: ChangedSpan)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
===========unchanged ref 1===========
at: coeditor.c3problem.C3ProblemGenerator
VERSION = "2.9"
max_span_lines: int = 500
max_span_chars: int = 6000
at: coeditor.c3problem.C3ProblemGenerator.__init__
self._is_training: bool = False
at: coeditor.c3problem.C3ProblemGenerator.post_edit_analysis
module_order = sort_modules_by_imports(module_deps)
at: coeditor.c3problem.C3ProblemGenerator.set_training
self._is_training = is_training
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Added
after: E1
map(f: Callable[[E1], T2]) -> "Added[T2]"
as_char()
at: coeditor.change.Deleted
before: E1
map(f: Callable[[E1], T2]) -> "Deleted[T2]"
as_char()
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
map(f: Callable[[E1], T2]) -> "Modified[T2]"
as_char()
at: coeditor.common
count_lines(text: str) -> int
ModuleName = str
at: coeditor.scoped_changes
JProjectChange(project_name: str, changed: Mapping[ModuleName, JModuleChange], all_modules: Modified[Collection[JModule]], commit_info: "CommitInfo | None")
at: coeditor.scoped_changes.ChangedSpan
change: Change[str]
parent_scopes: Sequence[Change[ChangeScope]]
line_range: LineRange
_is_func_body() -> bool
at: coeditor.scoped_changes.JModule
mname: ModuleName
tree: ptree.Module
===========unchanged ref 2===========
at: coeditor.scoped_changes.JModuleChange
module_change: Change[JModule]
changed: Sequence[ChangedSpan]
at: coeditor.scoped_changes.JProjectChange
project_name: str
changed: Mapping[ModuleName, JModuleChange]
all_modules: Modified[Collection[JModule]]
commit_info: "CommitInfo | None"
at: coeditor.scoped_changes.ProjectChangeProcessor
process_change(self, pchange: "JProjectChange", pre_analysis: Any, post_analysis: Any) -> Sequence[TProb]
at: typing
Mapping = _alias(collections.abc.Mapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.c3problem
class C3ProblemGenerator(ProjectChangeProcessor[C3Problem]):
"""
### Change log
- v2.9: Add sibling usages for class members. Improve statement signatures.
- v2.8: Fix module usages in `pre_edit_analysis`. Sort changes using heuristic.
- v2.7: Use new PyDefiniton that includes signatures.
- v2.6: fix missing changes in `JModuleChanges`. Rename to edit_line_ids.
- v2.5: fix newline encoding bug.
- v2.4: fix buggy encoding of `Added` and `Deleted` changes.
- v2.3: always generate problems with full editing range and move the problem
splitting logic elsewhere. Also changed the data format of `ChangedCodeSpan`.
"""
VERSION = "2.9"
# change spans with more than this many lines will be ignored
max_span_lines: int = 500
+ # change spans with more than this many characters will be ignored
+ max_span_chars: int = 6000
|
coeditor.model/RetrievalEditorModel.multi_round_edit_gain | Modified | temp-1 | cb1121e6d3570256308a1199501817b911ebaa90 | Improve multi-round edit gain evaluation. | <0>:<add> accept_keys.extend(group)
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def multi_round_edit_gain(
self,
problem: C3Problem,
tokenizer: C3ProblemTokenizer,
dec_args: DecodingArgs,
+ max_rounds: int = 8,
- max_rounds: int = 6,
print_steps: bool = True,
) -> "MultiRoundEditStats":
<s>
- if code_equal(pred.change.after, gold_change.after):
- accept_keys = list(pred_delta.keys())
- else:
+ accept_keys = list[DeltaKey]()
- accept_keys = list[DeltaKey]()
+ for group in pred_delta.change_groups():
- for group in pred_delta.change_groups():
+ expected = [span.delta.get(k) for k in group]
- expected = [span.delta.get(k) for k in group]
+ actual = [pred_delta.get(k) for k in group]
- actual = [pred_delta.get(k) for k in group]
+ if expected == actual:
- if expected == actual:
- accept_keys.extend(group)
<0> if accept_keys:
accept_delta, rest_delta = span.delta.decompose_for_change(accept_keys)
if print_steps:
cprint("green", "Accepted changes:")
print(accept_delta)
if rounds == 1:
first_gain = cost_model.get_edit_gain(
original, accept_delta, print_steps
)
else:
delta_keys = self._get_most_uncertain_edit(
batch, span.delta, problem.edit_line_ids, print_steps=print_steps
)
accept_delta, rest_delta = span.delta.decompose_for_change(delta_keys)
if print_steps:
cprint("red", "No accepted changes.")
print("Most uncertain changes:")
print(accept_</s> | ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def multi_round_edit_gain(
self,
problem: C3Problem,
tokenizer: C3ProblemTokenizer,
dec_args: DecodingArgs,
+ max_rounds: int = 8,
- max_rounds: int = 6,
print_steps: bool = True,
) -> "MultiRoundEditStats":
# offset: -1
<s> (since it's unclear how to define normalization for partial edits)."""
+
cost_model = EditCostModel()
problem = problem.restrict_span_changes()
span = problem.span
gold_change = span.get_change()
original = span.original.tolist()
if print_steps:
print_sections(("gold_change", show_change(gold_change)))
print("Remaining changes:")
print(span.delta)
gain = labe_gain = cost_model.get_edit_gain(original, span.delta, print_steps)
first_gain = 0
rounds = 0
for rounds in range(1, max_rounds + 1):
tk_prob = tokenizer.tokenize_problem(problem)
batch = C3DataLoader.pack_batch([tk_prob])
pred = self.predict_on_batch(batch, [problem], dec_args)[0][0]
pred_delta = TkDelta.from_output_tks(problem.edit_line_ids, pred.out_tks)
main_segs = output_ids_as_seqs(tk_prob.main_tks)
pred_str = TkC3Problem.show_predictions(pred.out_tks, main_segs)
if print_steps:
print_sections(("round", str(rounds)))
print(tk_prob.show(skip_ctx=True))
print("pred change:")
print(pred_str)
- if code_equal(pred.change.after, gold_change.after):
- accept_keys = list(pred</s>
===========above chunk 1===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def multi_round_edit_gain(
self,
problem: C3Problem,
tokenizer: C3ProblemTokenizer,
dec_args: DecodingArgs,
+ max_rounds: int = 8,
- max_rounds: int = 6,
print_steps: bool = True,
) -> "MultiRoundEditStats":
# offset: -2
+ """Compute the total edit gain via multi-round interaction.
+ Note that this is a strict metric that does not perform code normalization
+ (since it's unclear how to define normalization for partial edits)."""
+
cost_model = EditCostModel()
</s>
===========below chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def multi_round_edit_gain(
self,
problem: C3Problem,
tokenizer: C3ProblemTokenizer,
dec_args: DecodingArgs,
+ max_rounds: int = 8,
- max_rounds: int = 6,
print_steps: bool = True,
) -> "MultiRoundEditStats":
# offset: 1
<s>
cprint("red", "No accepted changes.")
print("Most uncertain changes:")
print(accept_delta)
gain -= cost_model.get_edit_gain(original, accept_delta, print_steps)
original = accept_delta.apply_to_change(original)
span = replace(span, original=TkArray.new(original), delta=rest_delta)
if print_steps:
print("Remaining changes:")
print(rest_delta)
if not rest_delta or rounds == max_rounds:
break
first_line = next(iter(rest_delta._deltas))
# shrink the edit range
edit_line_ids = accept_delta.get_new_line_ids(problem.edit_line_ids)
edit_line_ids = [l for l in edit_line_ids if l >= first_line]
problem = replace(problem, span=span, edit_line_ids=edit_line_ids)
# the remaining changes (if any) need to be applied manually
gain -= cost_model.get_edit_gain(original, span.delta, print_steps)
return MultiRoundEditStats(
label_edit_gain=labe_gain,
first_edit_gain=first_gain,
rounds=rounds,
total_edit_gain=gain,
)
===========unchanged ref 0===========
at: coeditor._utils
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
C3ProblemTokenizer(max_ref_tks: int=512, max_query_tks: int=512, max_output_tks: int=256, max_scope_tks: int=128, max_ref_tks_sum: int=512 * 16, ref_chunk_overlap: int=32, disable_builtin_defs: bool=True, disable_unchanged_refs: bool=False, current_code_only: bool=False)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
restrict_span_changes()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
|
coeditor.model/EditCostModel.get_edit_gain | Modified | temp-1 | ee22b08bc99bec226c5796368571fb20eda7af88 | Add least_effort multi-round strategy. | <0>:<add> delta = delta.shifted(-a)
| # module: coeditor.model
@dataclass
class EditCostModel:
def get_edit_gain(
self, original: TokenSeq, delta: TkDelta, print_steps: bool = False
):
if not delta:
return 0
+ edit_lines = list(k[0] for k in delta.keys())
+ a, b = edit_lines[0], edit_lines[-1]
+ original = tk_get_lines(original, a, b + 1)
<0> old_change = tokens_to_change(original)
new_change = tokens_to_change(delta.apply_to_change(original))
old_lines = old_change.after.splitlines()
new_lines = new_change.after.splitlines()
matcher = SequenceMatcher(None, old_lines, new_lines)
total = 0
for opcode in matcher.get_opcodes():
tag, i1, i2, j1, j2 = opcode
if tag == "equal":
continue
if tag == "delete":
# delete line-by-line
direct_cost = (i2 - i1) * self.delete_line_cost
# delete the selected range
batch_cost = self.cursor_jump_cost + 2
cost = min(direct_cost, batch_cost) + self.cursor_jump_cost
if print_steps:
print(f"delete lines: ({i1},{i2}), {cost=}")
elif tag == "insert":
cost = len("\n".join(new_lines[j1:j2])) + self.cursor_jump_cost
if print_steps:
print(f"insert lines: ({j1},{j2}), {cost=}")
else:
assert_eq(tag, "replace")
old_text = "\n".join(old_lines[i1:i2])
new_text = "\n".join(new_lines[j1:j2])
cost = keystroke_cost(old_text, new_text, self.cursor_jump_cost)
</s> | ===========below chunk 0===========
# module: coeditor.model
@dataclass
class EditCostModel:
def get_edit_gain(
self, original: TokenSeq, delta: TkDelta, print_steps: bool = False
):
# offset: 1
<s>:j2])
cost = keystroke_cost(old_text, new_text, self.cursor_jump_cost)
if print_steps:
print(f"replace lines: ({i1},{i2}) -> ({j1},{j2}), {cost=}")
for l in Differ().compare(
old_text.splitlines(), new_text.splitlines()
):
print(" " + l)
total += cost
if print_steps:
print("total cost:", total)
return total
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.TkC3Problem
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
truncated: bool
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
tk_get_lines(tks: TokenSeq, start_line: int, until_line: int) -> TokenSeq
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
keys() -> Iterable[DeltaKey]
shifted(shift_lines: int) -> Self
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
show(pred_tks: TokenSeq | None=None, skip_ctx: bool=False, skip_meta: bool=False) -> str
===========unchanged ref 1===========
at: coeditor.model.show_prediction
tk_prob = TkC3Problem(
main_input=TkArray.new(pred["input_ids"]),
header=TkArray.new([]),
output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
)
at: coeditor.tk_array
TkArray()
at: coeditor.tk_array.TkArray
new(tks: Sequence[int]) -> "TkArray"
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
at: difflib
SequenceMatcher(isjunk: Optional[Callable[[_T], bool]]=..., a: Sequence[_T]=..., b: Sequence[_T]=..., autojunk: bool=...)
Differ(linejunk: Optional[_JunkCallback]=..., charjunk: Optional[_JunkCallback]=...)
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_get_lines(tks: TokenSeq, start_line: int, until_line: int) -> TokenSeq:
+ """Get the token sequence for the lines between `start_line` and `until_line`."""
+ # use a loop to implement this
+ # line breaks are represented by the Newline_id token.
+ newline_pos = [-1]
+ for i, tk in enumerate(tks):
+ if tk == Newline_id:
+ newline_pos.append(i)
+ newline_pos.append(len(tks))
+ start_line = max(0, min(start_line, len(newline_pos) - 1))
+ until_line = max(0, min(until_line, len(newline_pos) - 1))
+ start = newline_pos[start_line] + 1
+ end = max(0, newline_pos[until_line])
+ return tks[start:end]
+
===========changed ref 1===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ def _get_least_effort_edit(
+ self,
+ original: TokenSeq,
+ delta: TkDelta,
+ cost_model: "EditCostModel",
+ print_steps: bool,
+ ) -> Sequence[DeltaKey]:
+ tk_lines = tk_splitlines(original)
+ group_costs = list[tuple]()
+ for group in delta.change_groups():
+ edit_lines = [k[0] for k in group]
+ a, b = edit_lines[0], edit_lines[-1]
+ subdelta = delta.for_keys(group).shifted(-a)
+ sub_input = join_list(tk_lines[a:b], Newline_id)
+ cost = cost_model.get_edit_gain(sub_input, subdelta, print_steps)
+ group_costs.append((group, cost))
+ return min(group_costs, key=lambda x: x[1])[0]
+
===========changed ref 2===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ model: RetrievalEditorModel
+ tokenizer: C3ProblemTokenizer
+ dec_args: DecodingArgs
+ strategy: Literal["most_uncertain", "least_effort"] = "most_uncertain"
+ max_rounds: int = 8
+ |
coeditor.service/EditPredictionService.apply_edit_to_elem | Modified | temp-1 | ee22b08bc99bec226c5796368571fb20eda7af88 | Add least_effort multi-round strategy. | <0>:<add> problem.span.original.tolist(), edit_start, edit_stop
| # module: coeditor.service
@dataclass
class EditPredictionService:
@staticmethod
def apply_edit_to_elem(
target: _EditRegion,
problem: C3Problem,
out_tks: TokenSeq,
) -> Modified[str]:
edit_line_ids = target.target_line_ids
edit_start = edit_line_ids[0]
edit_stop = edit_line_ids[-1] + 1
delta = (
TkDelta.from_output_tks(problem.edit_line_ids, out_tks)
.for_input_range((edit_start, edit_stop + 1))
.shifted(-edit_start)
)
+ change1_tks = tk_get_lines(
- change1_tks = get_tk_lines(
- problem.span.original.tolist(), range(edit_start, edit_stop)
<0> )
change1 = tokens_to_change(change1_tks)
change2_tks = delta.apply_to_change(change1_tks)
change2 = tokens_to_change(change2_tks)
# change2 is supposed to be the change we want. However, the tokenizer
# sometimes does not perfectly encode the input, hence we extract the
# delta and directly apply it to the current code to avoid unnecessary
# tokenization.
_, delta2 = StrDelta.from_change(Modified(change1.after, change2.after))
new_code = delta2.apply_to_input(target.current_code)
return Modified(target.current_code, new_code)
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
original: TkArray
delta: TkDelta
line_range: LineRange
module: ModuleName
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
TokenSeq = list[Token]
at: coeditor.encoding
tk_get_lines(tks: TokenSeq, start_line: int, until_line: int) -> TokenSeq
StrDelta(_deltas: Mapping[int, tuple[str, ...]])
TkDelta(_deltas: Mapping[int, tuple[TokenSeq, ...]])
tokens_to_change(tokens: TokenSeq) -> Modified[str]
at: coeditor.encoding.StrDelta
_deltas: Mapping[int, tuple[str, ...]]
from_change(change: Change[str]) -> tuple[str, "StrDelta"]
at: coeditor.encoding.TkDelta
_deltas: Mapping[int, tuple[TokenSeq, ...]]
apply_to_change(change: TokenSeq) -> TokenSeq
===========unchanged ref 1===========
for_input_range(line_range: tuple[int, int]) -> Self
shifted(shift_lines: int) -> Self
from_output_tks(edit_line_ids: Sequence[int], tks: TokenSeq, allow_truncated_tks: bool=True) -> "TkDelta"
at: coeditor.service
_EditRegion(current_code: str, target_lines: Sequence[int], target_line_ids: Sequence[int])
at: coeditor.service._EditRegion
current_code: str
target_lines: Sequence[int]
target_line_ids: Sequence[int]
at: coeditor.tk_array.TkArray
tolist() -> TokenSeq
===========changed ref 0===========
# module: coeditor.encoding
+ def tk_get_lines(tks: TokenSeq, start_line: int, until_line: int) -> TokenSeq:
+ """Get the token sequence for the lines between `start_line` and `until_line`."""
+ # use a loop to implement this
+ # line breaks are represented by the Newline_id token.
+ newline_pos = [-1]
+ for i, tk in enumerate(tks):
+ if tk == Newline_id:
+ newline_pos.append(i)
+ newline_pos.append(len(tks))
+ start_line = max(0, min(start_line, len(newline_pos) - 1))
+ until_line = max(0, min(until_line, len(newline_pos) - 1))
+ start = newline_pos[start_line] + 1
+ end = max(0, newline_pos[until_line])
+ return tks[start:end]
+
===========changed ref 1===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ model: RetrievalEditorModel
+ tokenizer: C3ProblemTokenizer
+ dec_args: DecodingArgs
+ strategy: Literal["most_uncertain", "least_effort"] = "most_uncertain"
+ max_rounds: int = 8
+
===========changed ref 2===========
# module: coeditor.scoped_changes
- def _to_decorated(tree: ptree.ClassOrFunc):
- decorated = not_none(tree.parent)
- if decorated.type == "async_funcdef":
- decorated = not_none(decorated.parent)
-
- if decorated.type == "decorated":
- return cast(ptree.PythonNode, decorated)
- else:
- return tree
-
===========changed ref 3===========
# module: coeditor.scoped_changes
- def _search_in_scope(
- self: ptree.Scope, filter: Callable[[ptree.PythonBaseNode], bool]
- ) -> Iterable[ptree.PythonBaseNode]:
- def scan(children: Sequence[ptree.PythonBaseNode]):
- for element in children:
- if filter(element):
- yield element
- if element.type in ptree._FUNC_CONTAINERS:
- yield from scan(element.children) # type: ignore
-
- return scan(self.children) # type: ignore
-
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def for_keys(self, keys: Collection[DeltaKey]) -> Self:
+ """Compute the delta for the given line range."""
+ key_set = set(keys)
+ acts1 = dict[int, list[TokenSeq]]()
+ for l, acts in self._deltas.items():
+ for i, act in enumerate(acts):
+ key = DeltaKey((l, i))
+ if key in key_set:
+ acts1.setdefault(l, []).append(act)
+ return TkDelta({k: tuple(v) for k, v in acts1.items()})
+
===========changed ref 5===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ def _get_least_effort_edit(
+ self,
+ original: TokenSeq,
+ delta: TkDelta,
+ cost_model: "EditCostModel",
+ print_steps: bool,
+ ) -> Sequence[DeltaKey]:
+ tk_lines = tk_splitlines(original)
+ group_costs = list[tuple]()
+ for group in delta.change_groups():
+ edit_lines = [k[0] for k in group]
+ a, b = edit_lines[0], edit_lines[-1]
+ subdelta = delta.for_keys(group).shifted(-a)
+ sub_input = join_list(tk_lines[a:b], Newline_id)
+ cost = cost_model.get_edit_gain(sub_input, subdelta, print_steps)
+ group_costs.append((group, cost))
+ return min(group_costs, key=lambda x: x[1])[0]
+ |
tests.test_edits/test_splitlines | Modified | temp-1 | ee22b08bc99bec226c5796368571fb20eda7af88 | Add least_effort multi-round strategy. | <0>:<add> assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
| # module: tests.test_edits
def test_splitlines():
rng = get_rng()
+ for n in range(60):
- for n in range(100):
+ for _ in range(10):
+ rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
- rand_input = [rng.choice(["a", "b", "c", "\n"]) for _ in range(n)]
+ input = "".join(rand_input).rstrip("\n")
- input = "".join(rand_input).rstrip("\n")
+ lines = splitlines(input)
- lines = splitlines(input)
+ # basic identity
- # basic identity
+ assert "\n".join(lines) == input
- assert "\n".join(lines) == input
+ assert count_lines(input) == len(lines)
- assert count_lines(input) == len(lines)
+ # encode and decode
- # encode and decode
+ enc = encode_lines_join(input)
- enc = encode_lines_join(input)
+ assert decode_tokens(enc) == input
- assert decode_tokens(enc) == input
+ # split tokens
- # split tokens
+ tk_lines = tk_splitlines(enc)
- tk_lines = tk_splitlines(enc)
+ assert len(tk_lines) == len(lines)
- assert len(tk_lines) == len(lines)
- assert_tks_eq(join_list(tk_lines, Newline_id), enc, "join_list(tk_lines)")
<0>
| ===========unchanged ref 0===========
at: coeditor.common
splitlines(text: str) -> list[str]
count_lines(text: str) -> int
at: coeditor.encoding
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
encode_lines_join(text: str) -> TokenSeq
at: random.Random
VERSION = 3 # used by getstate/setstate
_randbelow = _randbelow_with_getrandbits
choice(seq: Sequence[_T]) -> _T
at: tests.test_edits
get_rng()
===========changed ref 0===========
# module: coeditor.service
- def get_tk_lines(tks: TokenSeq, line_ids: Sequence[int]) -> TokenSeq:
- lines = tk_splitlines(tks)
- return join_list((lines[i] for i in line_ids), Newline_id)
-
===========changed ref 1===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ model: RetrievalEditorModel
+ tokenizer: C3ProblemTokenizer
+ dec_args: DecodingArgs
+ strategy: Literal["most_uncertain", "least_effort"] = "most_uncertain"
+ max_rounds: int = 8
+
===========changed ref 2===========
# module: coeditor.scoped_changes
- def _to_decorated(tree: ptree.ClassOrFunc):
- decorated = not_none(tree.parent)
- if decorated.type == "async_funcdef":
- decorated = not_none(decorated.parent)
-
- if decorated.type == "decorated":
- return cast(ptree.PythonNode, decorated)
- else:
- return tree
-
===========changed ref 3===========
# module: coeditor.scoped_changes
- def _search_in_scope(
- self: ptree.Scope, filter: Callable[[ptree.PythonBaseNode], bool]
- ) -> Iterable[ptree.PythonBaseNode]:
- def scan(children: Sequence[ptree.PythonBaseNode]):
- for element in children:
- if filter(element):
- yield element
- if element.type in ptree._FUNC_CONTAINERS:
- yield from scan(element.children) # type: ignore
-
- return scan(self.children) # type: ignore
-
===========changed ref 4===========
# module: coeditor.encoding
@dataclass(frozen=True)
class TkDelta:
+ def for_keys(self, keys: Collection[DeltaKey]) -> Self:
+ """Compute the delta for the given line range."""
+ key_set = set(keys)
+ acts1 = dict[int, list[TokenSeq]]()
+ for l, acts in self._deltas.items():
+ for i, act in enumerate(acts):
+ key = DeltaKey((l, i))
+ if key in key_set:
+ acts1.setdefault(l, []).append(act)
+ return TkDelta({k: tuple(v) for k, v in acts1.items()})
+
===========changed ref 5===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ def _get_least_effort_edit(
+ self,
+ original: TokenSeq,
+ delta: TkDelta,
+ cost_model: "EditCostModel",
+ print_steps: bool,
+ ) -> Sequence[DeltaKey]:
+ tk_lines = tk_splitlines(original)
+ group_costs = list[tuple]()
+ for group in delta.change_groups():
+ edit_lines = [k[0] for k in group]
+ a, b = edit_lines[0], edit_lines[-1]
+ subdelta = delta.for_keys(group).shifted(-a)
+ sub_input = join_list(tk_lines[a:b], Newline_id)
+ cost = cost_model.get_edit_gain(sub_input, subdelta, print_steps)
+ group_costs.append((group, cost))
+ return min(group_costs, key=lambda x: x[1])[0]
+
===========changed ref 6===========
# module: coeditor.encoding
+ def tk_get_lines(tks: TokenSeq, start_line: int, until_line: int) -> TokenSeq:
+ """Get the token sequence for the lines between `start_line` and `until_line`."""
+ # use a loop to implement this
+ # line breaks are represented by the Newline_id token.
+ newline_pos = [-1]
+ for i, tk in enumerate(tks):
+ if tk == Newline_id:
+ newline_pos.append(i)
+ newline_pos.append(len(tks))
+ start_line = max(0, min(start_line, len(newline_pos) - 1))
+ until_line = max(0, min(until_line, len(newline_pos) - 1))
+ start = newline_pos[start_line] + 1
+ end = max(0, newline_pos[until_line])
+ return tks[start:end]
+
===========changed ref 7===========
# module: coeditor.model
+ @dataclass
+ class MultiRoundEvaluator:
+ @torch.autocast("cuda")
+ def _get_most_uncertain_edit(
+ self,
+ batch: dict,
+ delta: TkDelta,
+ edit_line_ids: Sequence[int],
+ print_steps: bool,
+ ) -> Sequence[DeltaKey]:
+ device = self.model.device
+ input_ids = cast(LongTensor, LongTensor(batch["input_ids"]).to(device))
+ labels = [wrap_bos(batch["labels"][0])]
+ labels = cast(LongTensor, LongTensor(labels).to(device))
+ assert_eq(input_ids.size(0), 1)
+
+ output = self.model.forward(
+ input_ids,
+ references=batch["references"],
+ query_ref_list=batch["query_ref_list"],
+ labels=labels,
+ loss_reduction="none",
+ )
+ loss = not_none(output.loss)
+
+ out_ranges = delta.change_groups_as_output_ranges(edit_line_ids)
+ group2loss = dict[Sequence[DeltaKey], float]()
+ for r, group in zip(out_ranges, delta.change_groups()):
+ r_loss = float(loss[r].sum())
+ group2loss[group] = r_loss
+ if print_steps:
+ tks = repr(decode_tokens(labels[0, r].tolist()))
+ print(f"range={(r.start, r.stop)}, loss={r_loss:.4g},\n\ttokens={tks}")
+
+ return max(group2loss.keys(), key=lambda k: group2loss[k])
+ |
coeditor._utils/scalar_stats | Modified | temp-1 | 166ab213bb5646914915871ebd647ee2f8510f4f | Report under more edit cost models. | <0>:<add> "max": float(x.max()),
| # module: coeditor._utils
def scalar_stats(xs) -> dict[str, Any]:
x = np.array(xs)
return {
+ "mean": float(x.mean()),
- "mean": x.mean(),
+ "median": float(np.median(x)),
- "median": np.median(x),
+ "min": float(x.min()),
- "min": x.min(),
- "max": x.max(),
<0> }
| ===========unchanged ref 0===========
at: numpy._ArrayOrScalarCommon
mean(axis: None | _ShapeLike=..., dtype: DTypeLike=..., out: _NdArraySubClass=..., keepdims: bool=..., *, where: _ArrayLikeBool_co=...) -> _NdArraySubClass
mean(axis: None | _ShapeLike=..., dtype: DTypeLike=..., out: None=..., keepdims: bool=..., *, where: _ArrayLikeBool_co=...) -> Any
min(axis: None | _ShapeLike=..., out: _NdArraySubClass=..., keepdims: bool=..., initial: _NumberLike_co=..., where: _ArrayLikeBool_co=...) -> _NdArraySubClass
min(axis: None | _ShapeLike=..., out: None=..., keepdims: bool=..., initial: _NumberLike_co=..., where: _ArrayLikeBool_co=...) -> Any
at: numpy.core._multiarray_umath
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None, /)
===========unchanged ref 1===========
at: numpy.lib.function_base
median(a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike=..., out: _ArrayType=..., overwrite_input: bool=..., keepdims: bool=...) -> _ArrayType
median(a: _ArrayLikeObject_co, axis: None=..., out: None=..., overwrite_input: bool=..., keepdims: L[False]=...) -> Any
median(a: _ArrayLikeFloat_co, axis: None=..., out: None=..., overwrite_input: bool=..., keepdims: L[False]=...) -> floating[Any]
median(a: _ArrayLikeTD64_co, axis: None=..., out: None=..., overwrite_input: bool=..., keepdims: L[False]=...) -> timedelta64
median(a: _ArrayLikeComplex_co, axis: None=..., out: None=..., overwrite_input: bool=..., keepdims: L[False]=...) -> complexfloating[Any, Any]
median(a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike=..., out: None=..., overwrite_input: bool=..., keepdims: bool=...) -> Any
===========changed ref 0===========
# module: coeditor._utils
def assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str] = lambda: "") -> None:
for i in range(len(xs)):
x = xs[i - 1] if i > 0 else x
y = xs[i]
+ if x != y:
+ raise AssertionError(
- assert x == y, (
+ f"{x} (of type {type(x).__name__}) != {y} (of type {type(y).__name__}) at equality {i}.\n"
- f"{x} (of type {type(x).__name__}) != {y} (of type {type(y).__name__}) at equality {i}.\n"
+ + extra_message()
- + extra_message()
+ )
- )
|
coeditor._utils/pretty_print_dict | Modified | temp-1 | 166ab213bb5646914915871ebd647ee2f8510f4f | Report under more edit cost models. | <0>:<add> elif level >= max_show_level:
| # module: coeditor._utils
def pretty_print_dict(
d: dict,
level: int = 0,
max_show_level: int = 1000,
float_precision: int = 5,
):
+ def show_float(x: float):
+ return f"%.{float_precision}g" % x
+
for k, v in d.items():
print(" " * level, end="")
if isinstance(v, float):
+ print(f"{k}: {show_float(v)}")
- print(f"{k}: %.{float_precision}g" % v)
elif isinstance(v, dict) or isinstance(v, list):
+ if isinstance(v, dict) and all(isinstance(x, float) for x in v.values()):
+ dict_s = (
+ "{" + ", ".join(f"{k}: {show_float(v)}" for k, v in v.items()) + "}"
+ )
+ print(f"{k}: {dict_s}")
- if level >= max_show_level:
<0> print(f"{k}: ...")
else:
print(f"{k}:")
if isinstance(v, list):
v = {f"[{i}]": e for i, e in enumerate(v)}
pretty_print_dict(v, level=level + 1, max_show_level=max_show_level)
else:
print(f"{k}: {v}")
| ===========changed ref 0===========
# module: coeditor._utils
def scalar_stats(xs) -> dict[str, Any]:
x = np.array(xs)
return {
+ "mean": float(x.mean()),
- "mean": x.mean(),
+ "median": float(np.median(x)),
- "median": np.median(x),
+ "min": float(x.min()),
- "min": x.min(),
+ "max": float(x.max()),
- "max": x.max(),
}
===========changed ref 1===========
# module: coeditor._utils
def assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str] = lambda: "") -> None:
for i in range(len(xs)):
x = xs[i - 1] if i > 0 else x
y = xs[i]
+ if x != y:
+ raise AssertionError(
- assert x == y, (
+ f"{x} (of type {type(x).__name__}) != {y} (of type {type(y).__name__}) at equality {i}.\n"
- f"{x} (of type {type(x).__name__}) != {y} (of type {type(y).__name__}) at equality {i}.\n"
+ + extra_message()
- + extra_message()
+ )
- )
|
coeditor.dataset/make_or_load_dataset | Modified | temp-1 | 5d8aca1f03b3059648b2006f8015a1a72612cfc0 | Implement the C3ToCodeCompletion transformation. | <0>:<add> remake=remake_problems,
| # module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
problem_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
<s>Problem]]()
for split, probs in problems.items():
if split == "train":
continue
prob_lists = pmap(
problem_transformer.transform,
probs,
desc=f"transform({split})",
chunksize=1000,
)
results[split] = join_list(prob_lists)
return results
prob_config = repr_modified_args(change_processor)
processed_dir = get_dataset_dir(dataset_name) / "processed"
cache = PickleCache(processed_dir)
with timed_action("Making or loading C3 problems"):
problems = cache.cached(
prob_config,
lambda: datasets_from_repos(
get_dataset_dir(dataset_name) / "repos",
change_processor,
workers=workers,
),
remake=remake_problems,
)
size_mb = (processed_dir / prob_config).stat().st_size / (1024**2)
print(f"Problems total size: {size_mb:.2f} MB")
trans_config = repr_modified_args(problem_transformer)
transformed_dir = get_dataset_dir(dataset_name) / "transformed"
cache = PickleCache(transformed_dir)
with timed_action("Making or loading transformed C3 problems for eval"):
eval_probs = cache.cached(
f"{prob_config}-{trans_config}",
lambda: transform_eval_problems(problems),
<0> )
return C3ProblemDataset(
train=problems.get("train", []),
valid=eval_probs.get("valid", []),
test=eval_probs.get("test", []),
)
| ===========above chunk 0===========
# module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
problem_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
# offset: -1
def transform_eval_problems(
problems: dict[str, Sequence[C3Problem]]
) -> dict[str, Sequence[C3Problem]]:
results = dict[str, Sequence[C3Problem]]()
for split, probs in problems.items():
if split == "train":
continue
prob_lists</s>
===========unchanged ref 0===========
at: coeditor._utils
DefaultWorkers: int = multiprocessing.cpu_count() // 2
global DefaultWorkers
pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
timed_action(name: str, silent: bool=False)
PickleCache(cache_dir: Path)
repr_modified_args(instance, flatten: bool=False) -> str
at: coeditor._utils.PickleCache
cached(rel_path: Path | str, func: Callable[[], T1], remake: bool=False) -> T1
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3ProblemTransform()
at: coeditor.c3problem.C3ProblemTransform
transform(prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.common
get_dataset_dir(dataname: str) -> Path
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.dataset
datasets_from_repos(repos_root: Path, change_processor: ProjectChangeProcessor[C3Problem], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> dict[str, Sequence[C3Problem]]
===========unchanged ref 1===========
C3ProblemDataset(map: Mapping[_KT, _VT], **kwargs: _VT)
C3ProblemDataset(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
C3ProblemDataset(**kwargs: _VT)
at: coeditor.scoped_changes
ProjectChangeProcessor()
at: os.stat_result
st_mode: int # protection bits,
st_ino: int # inode number,
st_dev: int # device,
st_nlink: int # number of hard links,
st_uid: int # user id of owner,
st_gid: int # group id of owner,
st_size: int # size of file, in bytes,
st_atime: float # time of most recent access,
st_mtime: float # time of most recent content modification,
st_ctime: float # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows)
st_atime_ns: int # time of most recent access, in nanoseconds
st_mtime_ns: int # time of most recent content modification in nanoseconds
st_ctime_ns: int # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) in nanoseconds
st_reparse_tag: int
st_file_attributes: int
st_blocks: int # number of blocks allocated for file
st_blksize: int # filesystem blocksize
st_rdev: int # type of device if an inode device
st_flags: int # user defined flags for file
st_gen: int # file generation number
st_birthtime: int # time of file creation
st_rsize: int
st_creator: int
st_type: int
at: pathlib.Path
__slots__ = ()
stat() -> os.stat_result
===========unchanged ref 2===========
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ToCodeCompletion(C3ProblemTransform):
+ def __post_init__(self):
+ self._rng = random.Random()
+
===========changed ref 1===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ToCodeCompletion(C3ProblemTransform):
+ """Convert the C3 problem into an edit-oriented code completion problem by
+ randomly picking a changed line as the completion target, deleting its
+ old version, and treating the new version as the desired output.
+
+ ### Change log
+ - empty
+ """
+
+ VERSION = "1.0"
+ min_target_size = 6
+
===========changed ref 2===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ToCodeCompletion(C3ProblemTransform):
+ def transform(self, prob: C3Problem) -> Sequence[C3Problem]:
+ original = prob.span.original
+ delta = prob.span.delta
+
+ def group_filter(group: tuple) -> bool:
+ segs = [delta[k] for k in group]
+ return (
+ segs[0][0] == Add_id
+ and sum(len(s) for s in segs) >= self.min_target_size
+ )
+
+ add_groups = [ks for ks in delta.change_groups() if group_filter(ks)]
+ if not add_groups:
+ return []
+ # sample the completion target
+ target = self._rng.choice(add_groups)
+
+ prev_changes = [k for k in delta.keys() if k < target[0]]
+ if delta[target[-1]][0] == Del_id:
+ # if the last change is a deletion, move it into prev_changesine into before_changes
+ prev_changes.append(target[-1])
+ target = target[:-1]
+ assert target
+ prev_delta, rest_delta = delta.decompose_for_change(prev_changes)
+ new_original = prev_delta.apply_to_change(original.tolist())
+ new_delta_keys = tuple(rest_delta.keys())[: len(target)]
+ new_delta = rest_delta.for_keys(new_delta_keys)
+ assert new_delta, "the remaining delta should not be empty"
+ new_span = replace(
+ prob.span, original=TkArray.new(new_original), delta=new_delta
+ )
+ new_trans = prob.transformations + ("code_completion",)
+ new_lines = tuple(set(k[0] for k in new_delta.keys()))
+ new_prob = replace(
+ prob,
+ span=new</s> |
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | ad583b6e5c1cda2fb322aea90f34605c9d3f3643 | Use a new 3-stage training pipeline. - Add `filter` to C3DataLoader. - Add `truncated` to TkC3Problem. | <0>:<add> truncated=truncated,
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
<s> in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
+ truncated = False
if ref_size_sum < self.max_ref_tks_sum:
unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged)
for i, chunk in enumerate(unchanged):
all_refs.append((f"unchanged ref {i}", chunk))
+ else:
+ truncated = True
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", chunk))
ref_size_sum += sum(len(x) for x in changed)
+ else:
+ truncated = True
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for name, ref in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
+ truncated = True
+ break
- continue
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input),
TkArray.new(scope_tks),
TkArray.new(chunk_output),
path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
project=problem.src_info["project"],
commit=problem.src_info["commit"],
<0> )
| ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -1
<s>_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -2
<s> scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
for i, l in enumerate(problem.edit_line_ids):
for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for</s>
===========above chunk 2===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -3
span = problem.span
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
origin_lines = tk_splitlines(original)
edit_start = problem.edit_line_ids[0</s>
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.6"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Mapping[PyFullName, PyDefinition]) -> Sequence[TkArray]
_group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TkArray]
at: coeditor.c3problem.ChangedCodeSpan
headers: Sequence[ChangedHeader]
|
coeditor.dataset/make_or_load_dataset | Modified | temp-1 | ad583b6e5c1cda2fb322aea90f34605c9d3f3643 | Use a new 3-stage training pipeline. - Add `filter` to C3DataLoader. - Add `truncated` to TkC3Problem. | <0>:<add> f"eval-{prob_config}-{trans_config}",
| # module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
+ eval_transformer: C3ProblemTransform,
- problem_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
<s>
probs,
desc=f"transform({split})",
chunksize=1000,
)
results[split] = join_list(prob_lists)
return results
prob_config = repr_modified_args(change_processor)
processed_dir = get_dataset_dir(dataset_name) / "processed"
cache = PickleCache(processed_dir)
with timed_action("Making or loading C3 problems"):
problems = cache.cached(
prob_config,
lambda: datasets_from_repos(
get_dataset_dir(dataset_name) / "repos",
change_processor,
workers=workers,
),
remake=remake_problems,
)
size_mb = (processed_dir / prob_config).stat().st_size / (1024**2)
print(f"Problems total size: {size_mb:.2f} MB")
+ trans_config = repr_modified_args(eval_transformer)
- trans_config = repr_modified_args(problem_transformer)
transformed_dir = get_dataset_dir(dataset_name) / "transformed"
cache = PickleCache(transformed_dir)
with timed_action("Making or loading transformed C3 problems for eval"):
eval_probs = cache.cached(
- f"{prob_config}-{trans_config}",
<0> lambda: transform_eval_problems(problems),
remake=remake_problems,
)
return C3ProblemDataset(
train=problems.get("train", []),
valid=eval_probs.get("valid", []),
test=eval_probs.get("test", []),
)
| ===========above chunk 0===========
# module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
+ eval_transformer: C3ProblemTransform,
- problem_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
# offset: -1
def transform_eval_problems(
problems: dict[str, Sequence[C3Problem]]
) -> dict[str, Sequence[C3Problem]]:
results = dict[str, Sequence[C3Problem]]()
for split, probs in problems.items():
if split == "train":
continue
prob_lists = pmap(
+ eval_transformer.transform,
- problem_transformer.transform,
probs,
desc=f"transform({split})",
chunksize=1000,
)
results[split]</s>
===========unchanged ref 0===========
at: coeditor._utils
DefaultWorkers: int = multiprocessing.cpu_count() // 2
global DefaultWorkers
pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
timed_action(name: str, silent: bool=False)
PickleCache(cache_dir: Path)
repr_modified_args(instance, flatten: bool=False) -> str
at: coeditor._utils.PickleCache
cached(rel_path: Path | str, func: Callable[[], T1], remake: bool=False) -> T1
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
C3ProblemTransform()
at: coeditor.c3problem.C3ProblemTransform
transform(prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.common
get_dataset_dir(dataname: str) -> Path
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.dataset
datasets_from_repos(repos_root: Path, change_processor: ProjectChangeProcessor[C3Problem], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> dict[str, Sequence[C3Problem]]
===========unchanged ref 1===========
C3ProblemDataset(map: Mapping[_KT, _VT], **kwargs: _VT)
C3ProblemDataset(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
C3ProblemDataset(**kwargs: _VT)
at: coeditor.scoped_changes
ProjectChangeProcessor()
at: os.stat_result
st_mode: int # protection bits,
st_ino: int # inode number,
st_dev: int # device,
st_nlink: int # number of hard links,
st_uid: int # user id of owner,
st_gid: int # group id of owner,
st_size: int # size of file, in bytes,
st_atime: float # time of most recent access,
st_mtime: float # time of most recent content modification,
st_ctime: float # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows)
st_atime_ns: int # time of most recent access, in nanoseconds
st_mtime_ns: int # time of most recent content modification in nanoseconds
st_ctime_ns: int # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) in nanoseconds
st_reparse_tag: int
st_file_attributes: int
st_blocks: int # number of blocks allocated for file
st_blksize: int # filesystem blocksize
st_rdev: int # type of device if an inode device
st_flags: int # user defined flags for file
st_gen: int # file generation number
st_birthtime: int # time of file creation
st_rsize: int
st_creator: int
st_type: int
at: pathlib.Path
__slots__ = ()
stat() -> os.stat_result
===========unchanged ref 2===========
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ProblemChangeInlining(C3ProblemTransform):
+ def __post_init__(self):
+ self._rng = random.Random()
+
===========changed ref 1===========
# module: coeditor.c3problem
- @dataclass
- class C3ProblemChangeDropout(C3ProblemTransform):
- def __post_init__(self):
- self._rng = random.Random()
-
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class ChangedCodeSpan:
+ def change_size(self) -> int:
+ return len(self.original) + self.delta.change_size()
+
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
"Tokenized contextual code change prediction problem."
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
# most relevant to least relevant
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
+ truncated: bool
===========changed ref 4===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
"""
## Change log
+ - 2.6: increase max_ref_tks_sum from 512 * 12 to 512 * 16.
- 2.5: Sort used references by path.
- 2.4: Encode each changed reference individually. Encode signatures for unchanged.
"""
+ VERSION = "2.6"
- VERSION = "2.5"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
+ max_ref_tks_sum: int = 512 * 16
- max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
|
coeditor.model/show_prediction | Modified | temp-1 | ad583b6e5c1cda2fb322aea90f34605c9d3f3643 | Use a new 3-stage training pipeline. - Add `filter` to C3DataLoader. - Add `truncated` to TkC3Problem. | <0>:<add> truncated=False,
| # module: coeditor.model
def show_prediction(prob: C3Problem, pred: RetrievalModelPrediction) -> str:
span = prob.span
tk_prob = TkC3Problem(
main_input=TkArray.new(pred["input_ids"]),
header=TkArray.new([]),
output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
<0> )
return tk_prob.show(pred["output_ids"])
| ===========unchanged ref 0===========
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
show(pred_tks: TokenSeq | None=None, skip_ctx: bool=False, skip_meta: bool=False) -> str
at: coeditor.model
MultiRoundEditStats(label_edit_gain: int, first_edit_gain: int, total_edit_gain: int, rounds: int)
RetrivalEncoderOutputs(last_hidden_state: Tensor, hidden_state_mask: Tensor | None=None)
at: coeditor.model.MultiRoundEditStats
label_edit_gain: int
first_edit_gain: int
total_edit_gain: int
rounds: int
at: coeditor.model.RetrivalEncoderOutputs
last_hidden_state: Tensor
hidden_state_mask: Tensor | None = None
at: coeditor.model.show_prediction
tk_prob = TkC3Problem(
main_input=TkArray.new(pred["input_ids"]),
header=TkArray.new([]),
output=TkArray.new(pred["labels"]),
path=span.headers[-1].path,
change_type=prob.change_type,
named_references=[
(f"reference-{i}", TkArray.new(ref))
for i, ref in enumerate(pred["references"])
],
project=prob.src_info["project"],
commit=prob.src_info["commit"],
truncated=False,
)
===========unchanged ref 1===========
at: dataclasses
dataclass(_cls: Type[_T]) -> Type[_T]
dataclass(*, init: bool=..., repr: bool=..., eq: bool=..., order: bool=..., unsafe_hash: bool=..., frozen: bool=...) -> Callable[[Type[_T]], Type[_T]]
dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]
at: transformers.utils.generic
ModelOutput(**kwargs: _VT)
ModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
ModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
===========changed ref 0===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ProblemChangeInlining(C3ProblemTransform):
+ def __post_init__(self):
+ self._rng = random.Random()
+
===========changed ref 1===========
# module: coeditor.c3problem
- @dataclass
- class C3ProblemChangeDropout(C3ProblemTransform):
- def __post_init__(self):
- self._rng = random.Random()
-
===========changed ref 2===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class ChangedCodeSpan:
+ def change_size(self) -> int:
+ return len(self.original) + self.delta.change_size()
+
===========changed ref 3===========
# module: coeditor.c3problem
@dataclass(frozen=True)
class TkC3Problem(TokenizedEdit):
"Tokenized contextual code change prediction problem."
main_input: TkArray
header: TkArray
output: TkArray
path: ProjectPath
change_type: Change[None]
# most relevant to least relevant
named_references: Sequence[tuple[str, TkArray]]
project: str
commit: CommitInfo | None
+ truncated: bool
===========changed ref 4===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
"""
## Change log
+ - 2.6: increase max_ref_tks_sum from 512 * 12 to 512 * 16.
- 2.5: Sort used references by path.
- 2.4: Encode each changed reference individually. Encode signatures for unchanged.
"""
+ VERSION = "2.6"
- VERSION = "2.5"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
+ max_ref_tks_sum: int = 512 * 16
- max_ref_tks_sum: int = 512 * 12
ref_chunk_overlap: int = 32
===========changed ref 5===========
# module: coeditor.c3problem
+ @dataclass
+ class C3ProblemChangeInlining(C3ProblemTransform):
+ """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`,
+ but also randomly keep some subset of changes in the input.
+
+ ### Change log
+ - v1.3: make `random_subset` truely random.
+ - v1.2: fix newline encoding bug.
+ - v1.1
+ - Dropout changes using change groups instead of individual change actions.
+ - Perform dropout at entire problem level ratehr than chunk level. This way,
+ changes in later chunks will be visible as well.
+ - Removed `dropout_prob`.
+ """
+
+ VERSION = "1.3"
+
+ max_lines_to_edit: int = 25
+ max_split_factor: int = 4
+ # when dropping the changes into the input, the biggest ratio of changes to drop
+ max_dropout_ratio: float = 0.5
+ _test_prob: float = 0.01
+
===========changed ref 6===========
# module: coeditor.c3problem
- @dataclass
- class C3ProblemChangeDropout(C3ProblemTransform):
- """Split the problem into fixed-sized editing ranges like `C3ProblemSimpleSplit`,
- but also randomly keep some subset of changes in the input.
-
- ### Change log
- - v1.3: make `random_subset` truely random.
- - v1.2: fix newline encoding bug.
- - v1.1
- - Dropout changes using change groups instead of individual change actions.
- - Perform dropout at entire problem level ratehr than chunk level. This way,
- changes in later chunks will be visible as well.
- - Removed `dropout_prob`.
- """
-
- VERSION = "1.3"
-
- max_lines_to_edit: int = 25
- max_split_factor: int = 4
- # when dropping the changes into the input, the biggest ratio of changes to drop
- max_dropout_ratio: float = 0.5
- _test_prob: float = 0.01
- |
scripts.train_model/train_model | Modified | temp-1 | ad583b6e5c1cda2fb322aea90f34605c9d3f3643 | Use a new 3-stage training pipeline. - Add `filter` to C3DataLoader. - Add `truncated` to TkC3Problem. | <0>:<add> test_loader,
| # module: scripts.train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
<s>, train_loader, valid_loader, train_args)
- model.train_on_data(model_name, train_loader, eval_loader, train_args)
model.to("cuda")
+ test_loader = C3DataLoader(
+ datasets["test"], None, eval_tkn, eval_batch_args, shuffle=False, desc="test"
+ )
+ print(f"{len(test_loader)}")
+ print(f"{len(test_loader.all_probs)}")
with timed_action("Loss Evaluation"):
+ eval_result = model.eval_loss_on_loader(test_loader)
- eval_result = model.eval_loss_on_loader(eval_loader)
eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
wandb.log(eval_dict)
with timed_action("Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact_match_samples"
exact_acc = model.eval_on_data(
datasets["test"],
- eval_tkn,
- eval_batch_args,
<0> dec_args,
out_dir,
probs_to_save=300,
)
print("Exact-match accuracy:", exact_acc)
wandb.log({"test/exact-acc": exact_acc.average()})
cprint("blue", "Exact-match samples saved to:", out_dir)
return model
| ===========above chunk 0===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -1
<s>Loader(
+ random_subset(datasets["train"], len(datasets["train"]) // 2),
+ encoder.problem_tranform,
+ warmup_tkn,
+ batch_args,
+ filter=_not_truncated,
+ shuffle=True,
+ desc="stage 2 training",
+ )
+ warmup_targs = copy.deepcopy(train_args)
+ warmup_targs.learning_rate *= 2
+ warmup_targs.max_train_epochs = 1
+ model.train_on_data(model_name, warmup_loader, valid_loader, warmup_targs)
+
- if not eval_only:
+ with timed_action("final stage training"):
- with timed_action("Fine-tune Training"):
- # we attach the problem transform to the dataloader to generate data on-the-fly
train_loader = C3DataLoader(
+ random_subset(datasets["train"], len(datasets["train"]) // 4),
- datasets["train"],
encoder.problem_tranform,
train_tkn,
batch_args,
shuffle=True,
+ desc="final stage training",
- desc="training",
)
- print("Fine-tune batch stats:")
- pprint(train_loader.get_batch_stats())
+ model.train_on_data(model_name, train_loader, valid_loader, train_args)
- model.train_on_data(model_name, train_</s>
===========above chunk 1===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -2
<s>(
+ datasets["train"],
- warm_up_data,
encoder.problem_tranform,
warmup_tkn,
+ batch_args,
- warmup_bargs,
+ filter=_not_truncated,
shuffle=True,
+ desc="stage 1 training",
- desc="warm-up training",
)
- print("Warmup batch stats:")
- pprint(warmup_loader.get_batch_stats())
-
warmup_targs = copy.deepcopy(train_args)
warmup_targs.learning_rate *= 4
warmup_targs.max_train_epochs = 1
+ model.train_on_data(model_name, warmup_loader, valid_loader, warmup_targs)
- model.train_on_data(model_name, warmup_loader, eval_loader, warmup_targs)
+ with timed_action("stage 2 training"):
+ warmup_bargs = copy.deepcopy(batch_args)
+ warmup_bargs.min_queries *= 2
+ warmup_tkn = copy.copy(train_tkn)
+ warmup_tkn.max_ref_tks_sum //= 2
+ warmup_loader = C3DataLoader(
+ random_subset(datasets["train"], len(datasets["train"]) // 2),
+ encoder.problem</s>
===========above chunk 2===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -3
<s>"
train_tkn = encoder.edit_tokenizer
eval_tkn = copy.deepcopy(train_tkn)
eval_tkn.max_query_tks *= 2
eval_tkn.max_output_tks *= 2
eval_tkn.max_ref_tks_sum *= 2
+ valid_loader = C3DataLoader(
- eval_loader = C3DataLoader(
datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval"
)
+ if not eval_only:
+ # follow a 3-stage training pipeline
- if not eval_only and resumed_from is None:
+ with timed_action("stage 1 training"):
- with timed_action("Warm-up Training"):
warmup_bargs = copy.deepcopy(batch_args)
warmup_bargs.min_queries *= 4
- warmup_bargs.max_queries *= 2
-
- warm_up_data = random_subset(
- datasets["train"], len(datasets["train"]) // 4, rng=42
- )
warmup_tkn = copy.copy(train_tkn)
+ warmup_tkn.max_ref_tks_sum //= 4
- warmup_tkn.max_ref_tks_sum //= 3
warmup_loader = C3</s>
===========above chunk 3===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -4
<s>izer": encoder.edit_tokenizer.get_args(),
"batch_args": batch_args,
"train_args": train_args,
"dec_args": dec_args,
}.items()
}
project = "Coeditor" if not quicktest else "Coeditor-quicktest"
if eval_only:
project = "eval-" + project
wandb.init(dir="..", project=project, name=model_name, config=config_dict)
if quicktest:
print("Using fewer data for quick test.")
n_quick_exs = 20
datasets = C3ProblemDataset(
train=datasets["train"][:n_quick_exs],
valid=datasets["valid"][:n_quick_exs],
test=datasets["test"][:n_quick_exs],
)
if resumed_from is None:
model = RetrievalEditorModel.from_code_t5("base", reuse_embed=True)
else:
model = RetrievalEditorModel.load(resumed_from)
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
warnings.warn(
"CUDA_VISIBLE_DEVICES not set, using 0. Note that "
"the Huggingface Trainer will use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] =</s> |
coeditor.dataset/make_or_load_dataset | Modified | temp-1 | 105bd32bd2520f951e8003a3e08c82d784029b2d | Organize training code. | <0>:<add> test=problems.get("test", []),
| # module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
- eval_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
<s>,
- desc=f"transform({split})",
- chunksize=1000,
- )
- results[split] = join_list(prob_lists)
- return results
-
prob_config = repr_modified_args(change_processor)
processed_dir = get_dataset_dir(dataset_name) / "processed"
cache = PickleCache(processed_dir)
with timed_action("Making or loading C3 problems"):
problems = cache.cached(
prob_config,
lambda: datasets_from_repos(
get_dataset_dir(dataset_name) / "repos",
change_processor,
workers=workers,
),
remake=remake_problems,
)
size_mb = (processed_dir / prob_config).stat().st_size / (1024**2)
print(f"Problems total size: {size_mb:.2f} MB")
- trans_config = repr_modified_args(eval_transformer)
- transformed_dir = get_dataset_dir(dataset_name) / "transformed"
- cache = PickleCache(transformed_dir)
-
- with timed_action("Making or loading transformed C3 problems for eval"):
- eval_probs = cache.cached(
- f"eval-{prob_config}-{trans_config}",
- lambda: transform_eval_problems(problems),
- remake=remake_problems,
- )
-
return C3ProblemDataset(
train=problems.get("train", []),
+ valid=problems.get("valid", []),
- valid=eval_probs.get("valid", []),
- test=eval_probs.get("test", []),
<0> )
| ===========above chunk 0===========
# module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
- eval_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
# offset: -1
- def transform_eval_problems(
- problems: dict[str, Sequence[C3Problem]]
- ) -> dict[str, Sequence[C3Problem]]:
- results = dict[str, Sequence[C3Problem]]()
- for split, probs in problems.items():
- if split == "train":
- continue
- prob_lists = pmap(
- eval_transformer.transform,
- probs,
- desc=f"transform({split})",
- chunksize=1000,
- )
- results[split</s>
===========unchanged ref 0===========
at: coeditor._utils
DefaultWorkers: int = multiprocessing.cpu_count() // 2
global DefaultWorkers
pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
not_none(x: Optional[T1]) -> T1
timed_action(name: str, silent: bool=False)
PickleCache(cache_dir: Path)
repr_modified_args(instance, flatten: bool=False) -> str
at: coeditor._utils.PickleCache
cached(rel_path: Path | str, func: Callable[[], T1], remake: bool=False) -> T1
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
at: coeditor.c3problem.C3ProblemTransform
transform(prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.common
get_dataset_dir(dataname: str) -> Path
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.dataset
C3CombinedEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
default_factory=C3ProblemGenerator
), problem_tranform: C3ProblemTransform=field(default_factory=C3ProblemSimpleSplit), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
===========unchanged ref 1===========
datasets_from_repos(repos_root: Path, change_processor: ProjectChangeProcessor[C3Problem], max_history_per_repo: int=1000, workers: int=DefaultWorkers) -> dict[str, Sequence[C3Problem]]
C3ProblemDataset(map: Mapping[_KT, _VT], **kwargs: _VT)
C3ProblemDataset(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
C3ProblemDataset(**kwargs: _VT)
at: coeditor.dataset.C3CombinedEncoder
change_processor: ProjectChangeProcessor[C3Problem] = field(
default_factory=C3ProblemGenerator
)
problem_tranform: C3ProblemTransform = field(default_factory=C3ProblemSimpleSplit)
edit_tokenizer: C3ProblemTokenizer = field(default_factory=C3ProblemTokenizer)
at: os.stat_result
st_mode: int # protection bits,
st_ino: int # inode number,
st_dev: int # device,
st_nlink: int # number of hard links,
st_uid: int # user id of owner,
st_gid: int # group id of owner,
st_size: int # size of file, in bytes,
st_atime: float # time of most recent access,
st_mtime: float # time of most recent content modification,
st_ctime: float # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows)
st_atime_ns: int # time of most recent access, in nanoseconds
st_mtime_ns: int # time of most recent content modification in nanoseconds
st_ctime_ns: int # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) in nanoseconds
st_reparse_tag: int
st_file_attributes: int
st_blocks: int # number of blocks allocated for file
===========unchanged ref 2===========
st_blksize: int # filesystem blocksize
st_rdev: int # type of device if an inode device
st_flags: int # user defined flags for file
st_gen: int # file generation number
st_birthtime: int # time of file creation
st_rsize: int
st_creator: int
st_type: int
at: pathlib.Path
__slots__ = ()
stat() -> os.stat_result
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
===========changed ref 0===========
# module: coeditor.dataset
class C3ProblemDataset(TypedDict):
train: Sequence[C3Problem]
+ valid: Sequence[C3Problem]
- valid: Sequence[TransformedC3Problem]
+ test: Sequence[C3Problem]
- test: Sequence[TransformedC3Problem]
===========changed ref 1===========
# module: coeditor.dataset
- TransformedC3Problem = C3Problem
-
===========changed ref 2===========
# module: coeditor.dataset
time_limit_per_commit = 10.0
- |
scripts.train_model/train_model | Modified | temp-1 | 105bd32bd2520f951e8003a3e08c82d784029b2d | Organize training code. | <0>:<add> model.train_on_data(model_name, s_loader, valid_loader, s_targs)
| # module: scripts.train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
<s> copy.deepcopy(train_args)
- warmup_targs.learning_rate *= 2
- warmup_targs.max_train_epochs = 1
- model.train_on_data(model_name, warmup_loader, valid_loader, warmup_targs)
-
- with timed_action("final stage training"):
- train_loader = C3DataLoader(
- random_subset(datasets["train"], len(datasets["train"]) // 4),
- encoder.problem_tranform,
- train_tkn,
- batch_args,
- shuffle=True,
- desc="final stage training",
- )
- model.train_on_data(model_name, train_loader, valid_loader, train_args)
<0>
model.to("cuda")
test_loader = C3DataLoader(
datasets["test"], None, eval_tkn, eval_batch_args, shuffle=False, desc="test"
)
print(f"{len(test_loader)}")
print(f"{len(test_loader.all_probs)}")
with timed_action("Loss Evaluation"):
eval_result = model.eval_loss_on_loader(test_loader)
eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
wandb.log(eval_dict)
with timed_action("Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact_match_samples"
</s> | ===========above chunk 0===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -1
<s>,
+ desc=f"stage {stage} training",
- desc="stage 1 training",
)
+ s_targs = copy.deepcopy(train_args)
- warmup_targs = copy.deepcopy(train_args)
+ s_targs.learning_rate *= scale
- warmup_targs.learning_rate *= 4
+ s_targs.max_train_epochs = 1
- warmup_targs.max_train_epochs = 1
- model.train_on_data(model_name, warmup_loader, valid_loader, warmup_targs)
-
+ with timed_action(f"stage {stage} training"):
- with timed_action("stage 2 training"):
- warmup_bargs = copy.deepcopy(batch_args)
- warmup_bargs.min_queries *= 2
- warmup_tkn = copy.copy(train_tkn)
- warmup_tkn.max_ref_tks_sum //= 2
- warmup_loader = C3DataLoader(
- random_subset(datasets["train"], len(datasets["train"]) // 2),
- encoder.problem_tranform,
- warmup_tkn,
- batch_args,
- filter=_not_truncated,
- shuffle=True,
- desc="stage 2 training",
- )
- warmup_targs = copy.deepcopy(train_args)
- warmup_targs.learning_rate *= 2
- warmup_t</s>
===========above chunk 1===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -2
<s>action("stage 1 training"):
+ s_bargs = copy.deepcopy(batch_args)
- warmup_bargs = copy.deepcopy(batch_args)
+ s_bargs.min_queries *= scale
- warmup_bargs.min_queries *= 4
+ s_tkn = copy.copy(train_tkn)
- warmup_tkn = copy.copy(train_tkn)
+ s_tkn.max_ref_tks_sum //= scale
- warmup_tkn.max_ref_tks_sum //= 4
+ s_probs = [
+ x
+ for x in datasets["train"]
+ if sum(c.change_size() for c in x.relevant_changes)
+ <= s_tkn.max_ref_tks_sum
+ ]
+ s_probs = random_subset(s_probs, len(s_probs) // 4 * scale)
+ s_loader = C3DataLoader(
- warmup_loader = C3DataLoader(
+ s_probs,
- datasets["train"],
encoder.problem_tranform,
+ s_tkn,
- warmup_tkn,
batch_args,
filter=_not_truncated,
shuffle=True,
+ desc=f"stage {stage} training",
- desc="stage 1 training",
)
+ s</s>
===========above chunk 2===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -3
<s>exs],
valid=datasets["valid"][:n_quick_exs],
test=datasets["test"][:n_quick_exs],
)
if resumed_from is None:
model = RetrievalEditorModel.from_code_t5("base", reuse_embed=True)
else:
model = RetrievalEditorModel.load(resumed_from)
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
warnings.warn(
"CUDA_VISIBLE_DEVICES not set, using 0. Note that "
"the Huggingface Trainer will use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_tkn = encoder.edit_tokenizer
eval_tkn = copy.deepcopy(train_tkn)
eval_tkn.max_query_tks *= 2
eval_tkn.max_output_tks *= 2
eval_tkn.max_ref_tks_sum *= 2
valid_loader = C3DataLoader(
datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval"
)
if not eval_only:
# follow a 3-stage training pipeline
+ scales = [4, 2, 1]
+ for stage, scale in enumerate(scales):
- with</s>
===========above chunk 3===========
<s>train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -4
<s> datasets,
+ encoder,
+ remake_problems=recreate_data,
+ workers=multiprocessing.cpu_count(),
+ )
+
# limit the number of examples for faster testing
+ datasets["valid"] = random_subset(eval_probs["valid"], 10000, rng=42)
- datasets["valid"] = random_subset(datasets["valid"], 10000, rng=42)
+ datasets["test"] = random_subset(eval_probs["test"], 10000, rng=42)
- datasets["test"] = random_subset(datasets["test"], 10000, rng=42)
config_dict = {
k: get_modified_args(v)
for k, v in {
"edit_tokenizer": encoder.edit_tokenizer.get_args(),
"batch_args": batch_args,
"train_args": train_args,
"dec_args": dec_args,
}.items()
}
project = "Coeditor" if not quicktest else "Coeditor-quicktest"
if eval_only:
project = "eval-" + project
wandb.init(dir="..", project=project, name=model_name, config=config_dict)
if quicktest:
print("Using fewer data for quick test.")
n_quick_exs = 20
datasets = C3ProblemDataset(
train=datasets["train"][:n_</s> |
scripts.train_model/train_new_model | Modified | temp-1 | 105bd32bd2520f951e8003a3e08c82d784029b2d | Organize training code. | <0>:<add> model_name="coeditor-perm2k-c3-multi-v1.7.1",
| # module: scripts.train_model
def train_new_model():
train_model(
- model_name="coeditor-perm2k-c3-multi-v1.7",
<0> dataset_name="perm2k",
train_args=TrainingArgs(
max_train_epochs=1,
),
encoder=C3CombinedEncoder(
problem_tranform=C3ProblemChangeInlining(),
),
recreate_data=False,
quicktest=False,
)
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3ProblemChangeInlining(max_lines_to_edit: int=30, max_split_factor: int=4, max_inline_ratio: float=1.0, _test_prob: float=0.01, allow_empty_problems: bool=True)
at: coeditor.common
proj_root() -> Path
at: coeditor.dataset
C3CombinedEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
default_factory=C3ProblemGenerator
), problem_tranform: C3ProblemTransform=field(default_factory=C3ProblemSimpleSplit), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
at: os
chdir(path: _FdOrAnyPath) -> None
===========changed ref 0===========
# module: coeditor.dataset
- TransformedC3Problem = C3Problem
-
===========changed ref 1===========
# module: coeditor.dataset
time_limit_per_commit = 10.0
-
===========changed ref 2===========
# module: coeditor.dataset
class C3ProblemDataset(TypedDict):
train: Sequence[C3Problem]
+ valid: Sequence[C3Problem]
- valid: Sequence[TransformedC3Problem]
+ test: Sequence[C3Problem]
- test: Sequence[TransformedC3Problem]
===========changed ref 3===========
# module: coeditor.dataset
+ def make_or_load_transformed_dataset(
+ dataset_name: str,
+ dataset: C3ProblemDataset | None,
+ encoder: C3CombinedEncoder,
+ remake_problems: bool = False,
+ workers: int = DefaultWorkers,
+ ) -> dict[str, Sequence[C3Problem]]:
+ def transform_eval_problems(
+ dataset: C3ProblemDataset,
+ ) -> dict[str, Sequence[C3Problem]]:
+ results = dict[str, Sequence[C3Problem]]()
+ for split in ("valid", "test"):
+ prob_lists = pmap(
+ encoder.problem_tranform.transform,
+ dataset[split],
+ desc=f"transform({split})",
+ chunksize=1000,
+ max_workers=workers,
+ )
+ results[split] = join_list(prob_lists)
+ return results
+
+ proc_config = repr_modified_args(encoder.change_processor)
+ trans_config = repr_modified_args(encoder.problem_tranform)
+ transformed_dir = get_dataset_dir(dataset_name) / "transformed"
+ cache = PickleCache(transformed_dir)
+ return cache.cached(
+ f"eval-{proc_config}-{trans_config}",
+ lambda: transform_eval_problems(not_none(dataset)),
+ remake=remake_problems,
+ )
+
===========changed ref 4===========
# module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
- eval_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
- def transform_eval_problems(
- problems: dict[str, Sequence[C3Problem]]
- ) -> dict[str, Sequence[C3Problem]]:
- results = dict[str, Sequence[C3Problem]]()
- for split, probs in problems.items():
- if split == "train":
- continue
- prob_lists = pmap(
- eval_transformer.transform,
- probs,
- desc=f"transform({split})",
- chunksize=1000,
- )
- results[split] = join_list(prob_lists)
- return results
-
prob_config = repr_modified_args(change_processor)
processed_dir = get_dataset_dir(dataset_name) / "processed"
cache = PickleCache(processed_dir)
with timed_action("Making or loading C3 problems"):
problems = cache.cached(
prob_config,
lambda: datasets_from_repos(
get_dataset_dir(dataset_name) / "repos",
change_processor,
workers=workers,
),
remake=remake_problems,
)
size_mb = (processed_dir / prob_config).stat().st_size / (1024**2)
print(f"Problems total size: {size_mb:.2f} MB")
- trans_config = repr_modified_args(eval_transformer)
- transformed_dir = get_dataset_dir(dataset_name) / "transformed"
- cache = PickleCache(transformed_dir)
-
- with timed_action("Making or loading transformed C3 problems for eval"):
- eval_probs = cache.cached(
- f"eval-{prob_config}-{</s>
===========changed ref 5===========
# module: coeditor.dataset
def make_or_load_dataset(
dataset_name: str,
change_processor: ProjectChangeProcessor[C3Problem],
- eval_transformer: C3ProblemTransform,
remake_problems: bool = False,
workers: int = DefaultWorkers,
) -> C3ProblemDataset:
# offset: 1
<s> transformed C3 problems for eval"):
- eval_probs = cache.cached(
- f"eval-{prob_config}-{trans_config}",
- lambda: transform_eval_problems(problems),
- remake=remake_problems,
- )
-
return C3ProblemDataset(
train=problems.get("train", []),
+ valid=problems.get("valid", []),
- valid=eval_probs.get("valid", []),
+ test=problems.get("test", []),
- test=eval_probs.get("test", []),
)
===========changed ref 6===========
# module: scripts.train_model
def train_model(
model_name: str,
dataset_name: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
dec_args = DecodingArgs()
if quicktest:
model_name = "quicktest-" + model_name
if not eval_only:
check_save_dir(model_name)
# problems will be transformed and saved for valid and test but not train.
datasets = make_or_load_dataset(
dataset_name,
encoder.change_processor,
- encoder.problem_tranform,
remake_problems=recreate_data,
+ workers=multiprocessing.cpu_count(),
)
+
+ with timed_action("Making or loading transformed C3 problems for eval"):
+ # it's important to cache these due to randomness in the transformations
+ eval_probs = make_or_load_transformed_dataset(
+ dataset_name,
+ datasets,
+ encoder,
+ remake_problems=recreate_data,
+ workers=multiprocessing.cpu_count(),
+ )
+
# limit the number of examples for faster testing
+ datasets["valid"] = random_subset(eval_probs["valid"], 10000, rng=42)
- datasets["valid"] = random_subset(datasets["valid"], 10000, rng=42)
+ datasets["test"] = random_subset(eval_probs["test"], 10000, rng=42)
- datasets["test"] = random_subset(datasets["test"], 10000, rng=42)
config_dict = {
k: get_modified_args(v)
for k, v in {
"edit_tokenizer": encoder.edit_</s> |
coeditor._utils/pmap | Modified | temp-1 | 31e1ac706f81baaf059aaee0787edab67bda6f3b | Improve C3DataLoader performance. - move _post_process into pmap. - support `disable_unchanged_refs`. | <0>:<add> chunksize = max(1, chunksize)
| # module: coeditor._utils
def pmap(
f: Callable[..., T1],
*f_args: Any,
desc: str | None = None,
key_args: Mapping[str, Any] | None = None,
max_workers: int | None = None,
chunksize: int | None = None,
tqdm_args: Mapping[str, Any] | None = None,
) -> list[T1]:
"""
Parallel map with progress displaying.
"""
n = len(f_args[0])
assert_eq(n, *(len(xs) for xs in f_args))
tqdm_args = dict(tqdm_args) if tqdm_args else {}
tqdm_args.setdefault("smoothing", 0.0)
if desc is None:
desc = "pmap: " + f.__name__
if key_args is None:
key_args = {}
if max_workers is None:
max_workers = DefaultWorkers
if max_workers <= 1:
outs = list[T1]()
for i in tqdm(range(n), desc=desc, **tqdm_args):
outs.append(f(*(a[i] for a in f_args), **key_args))
return outs
if chunksize is None:
+ chunksize = n // (20 * max_workers)
- chunksize = max(1, n // (20 * max_workers))
<0>
tag_f = _TaggedFunc(f, key_args)
arg_tuples = zip(range(n), *f_args)
with (
multiprocessing.Pool(max_workers) as pool,
tqdm(total=n, desc=desc, **tqdm_args) as pbar,
):
results = dict[int, T1]()
for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
results[i] = r
pbar.update()
return [results[i] for i in range(n)]
| ===========unchanged ref 0===========
at: coeditor._utils
T1 = TypeVar("T1")
DefaultWorkers: int = multiprocessing.cpu_count() // 2
global DefaultWorkers
_TaggedFunc(f: Callable[..., T1], key_args: Mapping[str, Any])
assert_eq(x: T1, *xs: T1, extra_message: Callable[[], str]=lambda: "") -> None
at: multiprocessing
Pool(processes: Optional[int]=..., initializer: Optional[Callable[..., Any]]=..., initargs: Iterable[Any]=..., maxtasksperchild: Optional[int]=...) -> pool.Pool
at: tqdm.std
tqdm(iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0, gui=False, **kwargs)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Mapping = _alias(collections.abc.Mapping, 2)
|
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | 31e1ac706f81baaf059aaee0787edab67bda6f3b | Improve C3DataLoader performance. - move _post_process into pmap. - support `disable_unchanged_refs`. | <0>:<add> all_refs.append((f"unchanged ref {i}", chunk))
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
<s> chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
truncated = False
if ref_size_sum < self.max_ref_tks_sum:
+ if not self.disable_unchanged_refs:
+ unchanged = self._group_encode_unchanged_refs(
- unchanged = self._group_encode_unchanged_refs(problem.relevant_unchanged)
+ problem.relevant_unchanged
+ )
+ for i, chunk in enumerate(unchanged):
- for i, chunk in enumerate(unchanged):
- all_refs.append((f"unchanged ref {i}", chunk))
<0> else:
truncated = True
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", chunk))
ref_size_sum += sum(len(x) for x in changed)
else:
truncated = True
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for name, ref in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
truncated = True
break
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input</s> | ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -1
<s> truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", T</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -2
()
tk_delta: TkDelta = span.delta
origin_lines = tk_splitlines(original)
edit_start = problem.edit_line_ids[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
for i, l in enumerate(problem.edit_line_ids):
for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the</s>
===========below chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 1
<s>_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input),
TkArray.new(scope_tks),
TkArray.new(chunk_output),
path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
project=problem.src_info["project"],
commit=problem.src_info["commit"],
truncated=truncated,
)
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.6"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_unchanged_refs: bool = False
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Mapping[PyFullName, PyDefinition]) -> Sequence[TkArray]
_group_encode_changed_refs(changes: Sequence[ChangedCodeSpan]) -> Sequence[TkArray]
|
coeditor.model/RetrievalEditorModel.profile_run | Modified | temp-1 | 31e1ac706f81baaf059aaee0787edab67bda6f3b | Improve C3DataLoader performance. - move _post_process into pmap. - support `disable_unchanged_refs`. | <0>:<add> labels = 5 * torch.ones(1, len_out, dtype=torch.long, device=self.device)
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
+ def profile_run(self, repeats: int = 10, max_refs: int = 20):
- def profile_run(self, repeats: int = 10, max_refs: int = 10):
rand = random.Random(42)
for i in tqdm(range(repeats), "test run"):
+ len_in = rand.randint(64, 512)
+ len_out = rand.randint(14, 256)
+ input_ids = 5 * torch.ones(1, len_in, dtype=torch.long, device=self.device)
- input_ids = 5 * torch.ones(
- 1, rand.randint(64, 512), dtype=torch.long, device=self.device
- )
n_refs = rand.randint(max_refs // 2, max_refs)
references = [[5] * rand.randint(64, 512) for _ in range(n_refs)]
- labels = 5 * torch.ones(1, 128, dtype=torch.long, device=self.device)
<0> with torch.autocast("cuda"):
self.forward(as_any(input_ids), references, labels=as_any(labels))
| ===========unchanged ref 0===========
at: coeditor.common
TokenSeq = list[Token]
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
_compiled_call_impl : Optional[Callable] = None
forward: Callable[..., Any] = _forward_unimplemented
__call__ : Callable[..., Any] = _wrapped_call_impl
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
- def encode_token_seqs(
- self, references: Sequence[TokenSeq] | Sequence[str], pad_id=None
- ) -> LongTensor:
- references = [
- encode_lines_join(ref) if isinstance(ref, str) else ref
- for ref in references
- ]
- out = pad_token_seqs(references, pad_id=pad_id)
- out = out.to(self.device)
- return cast(LongTensor, out)
-
===========changed ref 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
"""
## Change log
- 2.6: increase max_ref_tks_sum from 512 * 12 to 512 * 16.
- 2.5: Sort used references by path.
- 2.4: Encode each changed reference individually. Encode signatures for unchanged.
"""
VERSION = "2.6"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
+ disable_unchanged_refs: bool = False
===========changed ref 2===========
# module: coeditor._utils
def pmap(
f: Callable[..., T1],
*f_args: Any,
desc: str | None = None,
key_args: Mapping[str, Any] | None = None,
max_workers: int | None = None,
chunksize: int | None = None,
tqdm_args: Mapping[str, Any] | None = None,
) -> list[T1]:
"""
Parallel map with progress displaying.
"""
n = len(f_args[0])
assert_eq(n, *(len(xs) for xs in f_args))
tqdm_args = dict(tqdm_args) if tqdm_args else {}
tqdm_args.setdefault("smoothing", 0.0)
if desc is None:
desc = "pmap: " + f.__name__
if key_args is None:
key_args = {}
if max_workers is None:
max_workers = DefaultWorkers
if max_workers <= 1:
outs = list[T1]()
for i in tqdm(range(n), desc=desc, **tqdm_args):
outs.append(f(*(a[i] for a in f_args), **key_args))
return outs
if chunksize is None:
+ chunksize = n // (20 * max_workers)
- chunksize = max(1, n // (20 * max_workers))
+ chunksize = max(1, chunksize)
tag_f = _TaggedFunc(f, key_args)
arg_tuples = zip(range(n), *f_args)
with (
multiprocessing.Pool(max_workers) as pool,
tqdm(total=n, desc=desc, **tqdm_args) as pbar,
):
results = dict[int, T1]()
for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
results[i] = r
pbar.update()
return [results[i] for i in range</s>
===========changed ref 3===========
# module: coeditor._utils
def pmap(
f: Callable[..., T1],
*f_args: Any,
desc: str | None = None,
key_args: Mapping[str, Any] | None = None,
max_workers: int | None = None,
chunksize: int | None = None,
tqdm_args: Mapping[str, Any] | None = None,
) -> list[T1]:
# offset: 1
<s>=chunksize):
results[i] = r
pbar.update()
return [results[i] for i in range(n)]
===========changed ref 4===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
span = problem.span
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
origin_lines = tk_splitlines(original)
edit_start = problem.edit_line_ids[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
for i, l in enumerate(problem.edit_line_ids):
for line in origin_lines[last_line + 1 : l]:
chunk_input.extend(line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_</s> |
coeditor.model/C3DataLoader._to_tokenized | Modified | temp-1 | 31e1ac706f81baaf059aaee0787edab67bda6f3b | Improve C3DataLoader performance. - move _post_process into pmap. - support `disable_unchanged_refs`. | <0>:<add> post.tokenize,
| # module: coeditor.model
@dataclass
class C3DataLoader:
def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]:
probs = list(probs)
if self.transform is not None:
# we can afford to store all transformed problems beforehand
probs = join_list(
pmap(
self.transform.transform,
probs,
+ chunksize=self.chunk_size // 2,
- chunksize=500,
max_workers=self.workers,
)
)
if self.shuffle:
# we need to shuffle after the transform to help serialization
# this also mixes the problems better
random.shuffle(probs)
+ post = _C3PostProcess(self.tokenizer, self.batch_args)
for i in range(0, len(probs), self.chunk_size):
# we can only afford to tokenize the problems on-the-fly
group = probs[i : i + self.chunk_size]
for tkprob in pmap(
- self.tokenizer.tokenize_problem,
<0> group,
tqdm_args={"disable": True},
max_workers=self.workers,
):
if self.filter(tkprob):
yield tkprob
| ===========unchanged ref 0===========
at: coeditor._utils
DefaultWorkers: int = multiprocessing.cpu_count() // 2
global DefaultWorkers
pmap(f: Callable[..., T1], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], /, *iterables: Iterable[Any], desc: str | None=None, key_args: Mapping[str, Any] | None=None, max_workers: int | None=None, chunksize: int | None=None, tqdm_args: Mapping[str, Any] | None=None) -> list[T1]
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
at: coeditor.c3problem.C3ProblemTransform
transform(prob: C3Problem) -> Sequence[C3Problem]
at: coeditor.common
join_list(segs: Iterable[Iterable[T1]], sep: T1 | None=None) -> list[T1]
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
filter: Callable[[TkC3Problem], bool] = _always_true
estimate_batch_stats()
at: coeditor.model.C3DataLoader.__iter__
self.epochs += 1
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
===========unchanged ref 1===========
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor._utils
def pmap(
f: Callable[..., T1],
*f_args: Any,
desc: str | None = None,
key_args: Mapping[str, Any] | None = None,
max_workers: int | None = None,
chunksize: int | None = None,
tqdm_args: Mapping[str, Any] | None = None,
) -> list[T1]:
"""
Parallel map with progress displaying.
"""
n = len(f_args[0])
assert_eq(n, *(len(xs) for xs in f_args))
tqdm_args = dict(tqdm_args) if tqdm_args else {}
tqdm_args.setdefault("smoothing", 0.0)
if desc is None:
desc = "pmap: " + f.__name__
if key_args is None:
key_args = {}
if max_workers is None:
max_workers = DefaultWorkers
if max_workers <= 1:
outs = list[T1]()
for i in tqdm(range(n), desc=desc, **tqdm_args):
outs.append(f(*(a[i] for a in f_args), **key_args))
return outs
if chunksize is None:
+ chunksize = n // (20 * max_workers)
- chunksize = max(1, n // (20 * max_workers))
+ chunksize = max(1, chunksize)
tag_f = _TaggedFunc(f, key_args)
arg_tuples = zip(range(n), *f_args)
with (
multiprocessing.Pool(max_workers) as pool,
tqdm(total=n, desc=desc, **tqdm_args) as pbar,
):
results = dict[int, T1]()
for i, r in pool.imap_unordered(tag_f, arg_tuples, chunksize=chunksize):
results[i] = r
pbar.update()
return [results[i] for i in range</s>
===========changed ref 1===========
# module: coeditor._utils
def pmap(
f: Callable[..., T1],
*f_args: Any,
desc: str | None = None,
key_args: Mapping[str, Any] | None = None,
max_workers: int | None = None,
chunksize: int | None = None,
tqdm_args: Mapping[str, Any] | None = None,
) -> list[T1]:
# offset: 1
<s>=chunksize):
results[i] = r
pbar.update()
return [results[i] for i in range(n)]
===========changed ref 2===========
# module: coeditor.model
+ @dataclass
+ class _C3PostProcess:
+ tokenizer: C3ProblemTokenizer
+ batch_args: BatchArgs
+
===========changed ref 3===========
# module: coeditor.model
+ @dataclass
+ class _C3PostProcess:
+ def tokenize(self, prob: C3Problem) -> TkC3Problem:
+ tk_prob = self.tokenizer.tokenize_problem(prob)
+ max_output_tks = self.tokenizer.max_output_tks
+ shuffle_extra_ids = self.batch_args.shuffle_extra_ids
+ output_tks = tk_prob.output_tks
+ output_tks = wrap_bos(output_tks)
+
+ if len(output_tks) > max_output_tks:
+ output_tks = output_tks[:max_output_tks]
+
+ main_input = tk_prob.main_input.tolist()
+
+ if shuffle_extra_ids and random.random() < 0.5:
+ id_map = random_extra_id_map()
+ main_input = [id_map.get(tk, tk) for tk in main_input]
+ output_tks = [id_map.get(tk, tk) for tk in output_tks]
+
+ return dataclasses.replace(
+ tk_prob, main_input=TkArray.new(main_input), output=TkArray.new(output_tks)
+ )
+
===========changed ref 4===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
- def encode_token_seqs(
- self, references: Sequence[TokenSeq] | Sequence[str], pad_id=None
- ) -> LongTensor:
- references = [
- encode_lines_join(ref) if isinstance(ref, str) else ref
- for ref in references
- ]
- out = pad_token_seqs(references, pad_id=pad_id)
- out = out.to(self.device)
- return cast(LongTensor, out)
-
===========changed ref 5===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
"""
## Change log
- 2.6: increase max_ref_tks_sum from 512 * 12 to 512 * 16.
- 2.5: Sort used references by path.
- 2.4: Encode each changed reference individually. Encode signatures for unchanged.
"""
VERSION = "2.6"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
+ disable_unchanged_refs: bool = False
|
coeditor.model/BatchArgs.eval_default | Modified | temp-1 | fe8ce35a47c9734c369591c95097bade645c064b | Use static batch size instead. - This simplifies implementation and improves batch size estimation. | <0>:<add> batch_size=2,
| # module: coeditor.model
@dataclass
class BatchArgs:
@classmethod
def eval_default(cls) -> Self:
return BatchArgs(
- max_queries=32,
<0> shuffle_extra_ids=False,
)
| ===========unchanged ref 0===========
at: coeditor.model
BatchArgs(batch_size: int=1, shuffle_extra_ids: bool=True)
at: coeditor.model.BatchArgs
batch_size: int = 1
shuffle_extra_ids: bool = True
===========changed ref 0===========
# module: coeditor.model
@dataclass
class BatchArgs:
- min_queries: int = 1
- max_queries: int = 8
+ batch_size: int = 1
shuffle_extra_ids: bool = True
|
scripts.train_model/train_model | Modified | temp-1 | fe8ce35a47c9734c369591c95097bade645c064b | Use static batch size instead. - This simplifies implementation and improves batch size estimation. | <0>:<add> model.train_on_data(model_name, s_loader, valid_loader, train_args)
| <s>_model
def train_model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
<s>_ref_tks_sum})"
s_loader = C3DataLoader(
s_probs,
encoder.problem_tranform,
s_tkn,
batch_args,
- filter=_not_truncated,
shuffle=True,
+ desc=desc,
- desc=f"stage {stage} training",
)
- s_targs = copy.deepcopy(train_args)
- s_targs.learning_rate *= scale
- s_targs.max_train_epochs = 1
- with timed_action(f"stage {stage} training"):
+
+ with timed_action(desc):
- model.train_on_data(model_name, s_loader, valid_loader, s_targs)
<0>
model.to("cuda")
test_loader = C3DataLoader(
datasets["test"], None, eval_tkn, eval_batch_args, shuffle=False, desc="test"
)
print(f"{len(test_loader)}")
print(f"{len(test_loader.all_probs)}")
with timed_action("Loss Evaluation"):
eval_result = model.eval_loss_on_loader(test_loader)
eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
wandb.log(eval_dict)
with timed_action("Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact_match_samples"
</s> | ===========above chunk 0===========
<s>model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -1
<s>_tkn.max_ref_tks_sum *= 2
valid_loader = C3DataLoader(
datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval"
)
if not eval_only:
+ # gradually increase the ctx size during training
- # follow a 3-stage training pipeline
scales = [4, 2, 1]
+ for scale in scales:
- for stage, scale in enumerate(scales):
- s_bargs = copy.deepcopy(batch_args)
- s_bargs.min_queries *= scale
s_tkn = copy.copy(train_tkn)
s_tkn.max_ref_tks_sum //= scale
s_probs = [
x
for x in datasets["train"]
if sum(c.change_size() for c in x.relevant_changes)
+ < s_tkn.max_ref_tks_sum
- <= s_tkn.max_ref_tks_sum
]
+ n_probs = max(1, len(s_probs) // max(scales) // 2) * scale
+ s_probs = random_subset(s_probs, n_probs)
- s_probs = random_subset(s_probs, len(s_probs) // 4 * scale)
+ desc = f"training (ctx={s_tkn.max_ref_tks_sum})"
s_loader = C3DataLoader(
s_probs,
encoder.problem</s>
===========above chunk 1===========
<s>model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -2
<s>.init(dir="..", project=project, name=model_name, config=config_dict)
if quicktest:
print("Using fewer data for quick test.")
n_quick_exs = 20
datasets = C3ProblemDataset(
train=datasets["train"][:n_quick_exs],
valid=datasets["valid"][:n_quick_exs],
test=datasets["test"][:n_quick_exs],
)
if resumed_from is None:
model = RetrievalEditorModel.from_code_t5("base", reuse_embed=True)
else:
model = RetrievalEditorModel.load(resumed_from)
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
warnings.warn(
"CUDA_VISIBLE_DEVICES not set, using 0. Note that "
"the Huggingface Trainer will use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_tkn = encoder.edit_tokenizer
eval_tkn = copy.deepcopy(train_tkn)
eval_tkn.max_query_tks *= 2
eval_tkn.max_output_tks *= 2
eval_tkn.max_ref_tks_sum *= 2
valid_loader = C3DataLoader(
datasets["</s>
===========above chunk 2===========
<s>model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -3
<s> and test but not train.
datasets = make_or_load_dataset(
dataset_name,
encoder.change_processor,
remake_problems=recreate_data,
workers=multiprocessing.cpu_count(),
)
with timed_action("Making or loading transformed C3 problems for eval"):
# it's important to cache these due to randomness in the transformations
eval_probs = make_or_load_transformed_dataset(
dataset_name,
datasets,
encoder,
remake_problems=recreate_data,
workers=multiprocessing.cpu_count(),
)
# limit the number of examples for faster testing
datasets["valid"] = random_subset(eval_probs["valid"], 10000, rng=42)
datasets["test"] = random_subset(eval_probs["test"], 10000, rng=42)
config_dict = {
k: get_modified_args(v)
for k, v in {
+ "description": description,
"edit_tokenizer": encoder.edit_tokenizer.get_args(),
"batch_args": batch_args,
"train_args": train_args,
"dec_args": dec_args,
}.items()
}
project = "Coeditor" if not quicktest else "Coeditor-quicktest"
if eval_only:
project = "eval-" + project
w</s>
===========above chunk 3===========
<s>model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -4
dec_args = DecodingArgs()
if quicktest:
model_name = "quicktest-" + model_name
if not eval_only:
check_save_dir(model_name)
# problems will be transformed and saved</s>
===========below chunk 0===========
<s>_model(
model_name: str,
dataset_name: str,
+ description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: 1
<s>Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact_match_samples"
exact_acc = model.eval_on_data(
datasets["test"],
test_loader,
dec_args,
out_dir,
probs_to_save=300,
)
print("Exact-match accuracy:", exact_acc)
wandb.log({"test/exact-acc": exact_acc.average()})
cprint("blue", "Exact-match samples saved to:", out_dir)
return model
|
scripts.train_model/train_new_model | Modified | temp-1 | fe8ce35a47c9734c369591c95097bade645c064b | Use static batch size instead. - This simplifies implementation and improves batch size estimation. | <0>:<add> # edit_tokenizer=C3ProblemTokenizer(disable_unchanged_refs=True),
| # module: scripts.train_model
def train_new_model():
train_model(
+ model_name="coeditor-perm2k-c3-multi-v1.7.2",
- model_name="coeditor-perm2k-c3-multi-v1.7.1",
dataset_name="perm2k",
+ description="Use fixed batch size.",
train_args=TrainingArgs(
max_train_epochs=1,
),
encoder=C3CombinedEncoder(
problem_tranform=C3ProblemChangeInlining(),
<0> ),
recreate_data=False,
quicktest=False,
)
| ===========unchanged ref 0===========
at: coeditor.c3problem
C3ProblemChangeInlining(max_lines_to_edit: int=30, max_split_factor: int=4, max_inline_ratio: float=1.0, _test_prob: float=0.01, allow_empty_problems: bool=True)
at: coeditor.dataset
C3CombinedEncoder(change_processor: ProjectChangeProcessor[C3Problem]=field(
default_factory=C3ProblemGenerator
), problem_tranform: C3ProblemTransform=field(default_factory=C3ProblemSimpleSplit), edit_tokenizer: C3ProblemTokenizer=field(default_factory=C3ProblemTokenizer))
at: coeditor.model
TrainingArgs(learning_rate: float=2e-5, weight_decay: float=0.01, max_train_epochs: int=1, lr_scheduler_type: SchedulerType=SchedulerType.LINEAR)
at: coeditor.model.TrainingArgs
learning_rate: float = 2e-5
weight_decay: float = 0.01
max_train_epochs: int = 1
lr_scheduler_type: SchedulerType = SchedulerType.LINEAR
===========changed ref 0===========
# module: scripts.train_model
- def _not_truncated(p: TkC3Problem) -> bool:
- return not p.truncated
-
===========changed ref 1===========
# module: scripts.train_model
def eval_code_completion():
train_model(
model_name="coeditor-xl-c3-completion-v1.6-resumed",
dataset_name="tiny",
+ description="",
encoder=C3CombinedEncoder(
problem_tranform=C3ToCodeCompletion(),
),
resumed_from=(get_model_dir(True) / "coeditor-xl-c3-dropout-v1.6-resumed"),
eval_only=True,
)
===========changed ref 2===========
# module: coeditor.model
@dataclass
class C3DataLoader:
- def get_batch_stats(self):
- return self._batch_stast
-
===========changed ref 3===========
# module: coeditor.model
@dataclass
class BatchArgs:
@classmethod
def eval_default(cls) -> Self:
return BatchArgs(
+ batch_size=2,
- max_queries=32,
shuffle_extra_ids=False,
)
===========changed ref 4===========
# module: coeditor.model
@dataclass
class BatchArgs:
- min_queries: int = 1
- max_queries: int = 8
+ batch_size: int = 1
shuffle_extra_ids: bool = True
===========changed ref 5===========
# module: coeditor.model
@dataclass
class C3DataLoader:
def __post_init__(self):
+ n_batches = self.estimate_n_batches()
- n_batches, batch_stats = self.estimate_batch_stats()
self._len_est = n_batches
- self._batch_stast = batch_stats
self.epochs = 0
===========changed ref 6===========
# module: coeditor.model
@dataclass
class C3DataLoader:
- def _cost_limit(self) -> float:
- min_queries = self.batch_args.min_queries
- tkn = self.tokenizer
- return min_queries * retrieval_cost_model(
- tkn.max_ref_tks_sum, tkn.max_query_tks, tkn.max_output_tks
- )
-
===========changed ref 7===========
# module: coeditor.model
@dataclass
class C3DataLoader:
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
- # a problem filter can be used to filter out problems that are not suitable for training
- filter: Callable[[TkC3Problem], bool] = _always_true
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
===========changed ref 8===========
# module: coeditor.model
@dataclass
class C3DataLoader:
+ def estimate_n_batches(self) -> int:
+ factor = 10
+ n = max(1, len(self.all_probs) // factor)
+ subset = random_subset(self.all_probs, n, rng=42)
+ probs = list(subset)
+ if self.transform is not None:
+ # we can afford to store all transformed problems beforehand
+ probs = join_list(
+ pmap(
+ self.transform.transform,
+ probs,
+ chunksize=self.chunk_size // 2,
+ max_workers=self.workers,
+ )
+ )
+ # better to have a smaller estimate to avoid triggering data regeneration
+ n_batches = max(1, len(probs) // self.batch_args.batch_size)
+ est = max(1, int(len(self.all_probs) / n * n_batches * 0.99))
+ return est
+
===========changed ref 9===========
# module: coeditor.model
@dataclass
class C3DataLoader:
- def estimate_batch_stats(self):
- factor = 10
- n = max(1, len(self.all_probs) // factor)
- subset = random_subset(self.all_probs, n, rng=42)
- batches = self._problems_to_batches(self._to_tokenized(subset))
- bsizes = list[int]()
- for b in tqdm(batches, desc="estimate_batch_stats", smoothing=0.0):
- bsizes.append(len(b["input_ids"]))
- batch_stats = {k: f"{v:.1f}" for k, v in scalar_stats(bsizes).items()}
- # better to have a smaller estimate to avoid triggering data regeneration
- size_est = max(1, int(len(self.all_probs) / n * len(bsizes) * 0.99))
- return size_est, batch_stats
-
===========changed ref 10===========
# module: coeditor.model
@dataclass
class C3DataLoader:
def _to_tokenized(self, probs: Sequence[C3Problem]) -> Iterable[TkC3Problem]:
probs = list(probs)
if self.transform is not None:
# we can afford to store all transformed problems beforehand
probs = join_list(
pmap(
self.transform.transform,
probs,
chunksize=self.chunk_size // 2,
max_workers=self.workers,
)
)
if self.shuffle:
# we need to shuffle after the transform to help serialization
# this also mixes the problems better
random.shuffle(probs)
post = _C3PostProcess(self.tokenizer, self.batch_args)
for i in range(0, len(probs), self.chunk_size):
# we can only afford to tokenize the problems on-the-fly
group = probs[i : i + self.chunk_size]
+ yield from pmap(
- for tkprob in pmap(
post.tokenize,
group,
tqdm_args={"disable": True},
max_workers=self.workers,
+ )
- ):
- if self.filter(tkprob):
- yield tkprob
|
coeditor.service/EditPredictionService._suggest_edit_two_steps | Modified | temp-1 | ad918b35e2b8314f30a7f8ffc1e957c9f49956df | plugin 0.4.2: support applying subchanges. | <0>:<add> changes=changes,
| # module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
<s>_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".join(
compute_line_diffs_fast(
splitlines(pred_change.before),
splitlines(pred_change.after),
)
)
+ input_status, diff_status, changes = compute_line_status(pred_change)
- input_status, change_status = compute_line_status(pred_change)
+ line_offset = target_lines[0]
input_status = [
+ (i + line_offset, tag) for i, tag in input_status.items()
- (i + target_lines[0], tag) for i, tag in input_status.items()
]
+ output_status = list(diff_status.items())
- output_status = list(change_status.items())
+ for c in changes:
+ c["start"] += line_offset
+ c["until"] += line_offset
suggestion = EditSuggestion(
score=pred.score,
change_preview=preview,
- new_code=pred_change.after,
input_status=input_status,
output_status=output_status,
<0> )
suggestions.append(suggestion)
return ServiceResponse(
target_file=str(self.project / file),
edit_start=(target_lines[0], 0),
edit_end=(target_lines[-1] + 1, 0),
target_lines=target.target_lines,
input_code=target.current_code,
suggestions=suggestions,
)
return target, next_step
| ===========above chunk 0===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -1
<s>(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = show_prediction(problem, pred)
print(pred_str, file=f)
target_lines = target.target_lines
suggestions = list[EditSuggestion]()
for pred in predictions[:n_suggestions]:
pred_change = self.apply_edit_to_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".</s>
===========above chunk 1===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: -2
timed = self.tlogger.timed
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
target = self.get_target_code(span.code, problem, tk_prob)
def next_step():
batch = C3DataLoader.pack_batch([tk_prob])
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [problem], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:</s>
===========unchanged ref 0===========
at: coeditor._utils
assert_eq(x: T1, y: T1, message: Callable[[], str]=lambda: "") -> None
compute_line_diffs_fast(before: Sequence[str], after: Sequence[str])
at: coeditor._utils.TimeLogger
times: dict[str, list[float]] = field(default_factory=dict)
timed(self, name: str)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
tokenize_problem(problem: C3Problem) -> TkC3Problem
at: coeditor.change.Modified
before: E1
after: E1
unchanged: bool = False
at: coeditor.common
RelPath = NewType("RelPath", Path)
splitlines(text: str) -> list[str]
at: coeditor.model
RetrievalModelPrediction(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
RetrievalModelPrediction(**kwargs: _VT)
RetrievalModelPrediction(map: Mapping[_KT, _VT], **kwargs: _VT)
===========unchanged ref 1===========
show_prediction(prob: C3Problem, pred: RetrievalModelPrediction) -> str
C3DataLoader(all_probs: Sequence[C3Problem], transform: C3ProblemTransform | None, tokenizer: C3ProblemTokenizer, batch_args: BatchArgs, shuffle: bool, desc: str, tqdm_args: dict | None=None, chunk_size: int=1000, workers: int=DefaultWorkers)
at: coeditor.model.C3DataLoader
all_probs: Sequence[C3Problem]
transform: C3ProblemTransform | None
tokenizer: C3ProblemTokenizer
batch_args: BatchArgs
shuffle: bool
desc: str
tqdm_args: dict | None = None
chunk_size: int = 1000
workers: int = DefaultWorkers
pack_batch(probs: Sequence[TkC3Problem])
at: coeditor.model.PredictedChange
change: Modified[str]
out_tks: TokenSeq
score: float
n_samples: int
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
decorate_autocast(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1)
predict_on_batch(batch: dict, problems: Sequence[C3Problem], dec_args: DecodingArgs, n_solutions: int=1) -> list[list[PredictedChange]]
at: coeditor.model.RetrievalEditorModel.__init__
self.tlogger = TimeLogger()
at: coeditor.service
_tlogger = TimeLogger()
EditSuggestion(map: Mapping[_KT, _VT], **kwargs: _VT)
EditSuggestion(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
EditSuggestion(**kwargs: _VT)
|
coeditor.service/compute_line_status | Modified | temp-1 | ad918b35e2b8314f30a7f8ffc1e957c9f49956df | plugin 0.4.2: support applying subchanges. | <0>:<add> return input_status, diff_status, changes
| # module: coeditor.service
def compute_line_status(change: Modified[str]):
<s> for the lines in the delta file
+ diff_status = dict[int, StatusTag]()
- change_status = dict[int, StatusTag]()
+ # the line change operations
+ changes = list[LineChange]()
for tag, (i1, i2), (j1, j2) in diff_ops:
+ if tag != " ":
+ old_str = _join_lines(in_lines[i1:i2])
+ new_str = _join_lines(out_lines[j1:j2])
+ changes.append(
+ LineChange(start=i1, until=i2, old_str=old_str, new_str=new_str)
+ )
if tag == "A":
+ input_status[i1] = "A"
- before_status[i1] = "A"
for j in range(j1, j2):
+ diff_status[offset + j] = "A"
- change_status[offset + j] = "A"
continue
if tag == "D":
for _ in range(i2 - i1):
+ diff_status[offset + j1] = "D"
- change_status[offset + j1] = "D"
offset += 1
if tag == "R":
for _ in range(i2 - i1):
+ diff_status[offset + j1] = "RD"
- change_status[offset + j1] = "RD"
offset += 1
for j in range(j1, j2):
+ diff_status[offset + j] = "RA"
- change_status[offset + j] = "RA"
for i in range(i1, i2):
+ if i not in input_status:
- if i not in before_status:
+ input_status[i] = tag
- before_status[i] = tag
- return before_status, change_status
<0>
| ===========above chunk 0===========
# module: coeditor.service
def compute_line_status(change: Modified[str]):
# offset: -1
+ """Given a str change, compute the line status for the input ('A', 'D', or 'R')
+ and the line status for the change diff ('A', 'D', 'RA', or 'RD')."""
+ in_lines = splitlines(change.before)
+ out_lines = splitlines(change.after)
+ diff_ops = get_diff_ops(in_lines, out_lines)
- diff_ops = get_diff_ops(splitlines(change.before), splitlines(change.after))
offset = 0
# the line status for the lines before the edit
+ input_status = dict[int, StatusTag]()
- before_status = dict[int, StatusTag]()
# the line status for the lines in the delta file
+ diff_status = dict[int, StatusTag]()
- change_status = dict</s>
===========unchanged ref 0===========
at: coeditor._utils
CodePosition = NewType("CodePosition", tuple[int, int])
at: coeditor.change
Modified(before: E1, after: E1, unchanged: bool=False)
at: coeditor.change.Modified
before: E1
after: E1
at: coeditor.common
splitlines(text: str) -> list[str]
at: coeditor.service
StatusTag = Literal["A", "D", "R", " ", "RA", "RD"]
LineChange(**kwargs: _VT)
LineChange(map: Mapping[_KT, _VT], **kwargs: _VT)
LineChange(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
_tag_map: dict[str, StatusTag] = {
"insert": "A",
"delete": "D",
"replace": "R",
"equal": " ",
}
get_diff_ops(before: Sequence[str], after: Sequence[str]) -> list[tuple[StatusTag, tuple[int, int], tuple[int, int]]]
at: difflib
SequenceMatcher(isjunk: Optional[Callable[[_T], bool]]=..., a: Sequence[_T]=..., b: Sequence[_T]=..., autojunk: bool=...)
at: difflib.SequenceMatcher
get_opcodes() -> List[Tuple[str, int, int, int, int]]
__class_getitem__ = classmethod(GenericAlias)
at: typing
Sequence = _alias(collections.abc.Sequence, 1)
===========changed ref 0===========
# module: coeditor.service
+ class LineChange(TypedDict):
+ start: int
+ until: int
+ old_str: str
+ new_str: str
+
===========changed ref 1===========
# module: coeditor.service
class EditSuggestion(TypedDict):
score: float
change_preview: str
- new_code: str
input_status: list[tuple[int, StatusTag]]
output_status: list[tuple[int, StatusTag]]
+ changes: list[LineChange]
===========changed ref 2===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
timed = self.tlogger.timed
with timed("get c3 problem"):
problem, span = self.detector.get_problem(file, edit_lines)
with timed("tokenize c3 problem"):
tk_prob = self.c3_tkn.tokenize_problem(problem)
target = self.get_target_code(span.code, problem, tk_prob)
def next_step():
batch = C3DataLoader.pack_batch([tk_prob])
with timed("run model"), torch.autocast("cuda"):
predictions = self.model.predict_on_batch(
batch, [problem], self.dec_args, self.show_max_solutions
)
assert_eq(len(predictions), 1)
predictions = predictions[0]
assert predictions
if log_dir is not None:
log_dir.mkdir(exist_ok=True)
input_tks = batch["input_ids"][0]
references = batch["references"]
output_truth = batch["labels"][0]
print(f"Writing logs to: {log_dir}")
for i, pred in enumerate(predictions):
with (log_dir / f"solution-{i}.txt").open("w") as f:
pred_tks = pred.out_tks
score = pred.score
print(f"{problem.edit_line_ids=}", file=f)
print(f"{len(input_tks)=}", file=f)
print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g</s>
===========changed ref 3===========
# module: coeditor.service
@dataclass
class EditPredictionService:
def _suggest_edit_two_steps(
self,
file: RelPath,
edit_lines: Sequence[int] | int,
log_dir: Path | None = Path(".coeditor_logs"),
n_suggestions: int = 1,
) -> tuple[_EditRegion, Callable[[], ServiceResponse]]:
# offset: 1
<s> print(f"{len(references)=}", file=f)
print(f"Solution score: {score:.3g}", file=f)
print(f"Marginalized samples:", pred.n_samples, file=f)
pred = RetrievalModelPrediction(
input_ids=input_tks,
output_ids=pred_tks,
labels=output_truth,
references=references,
)
pred_str = show_prediction(problem, pred)
print(pred_str, file=f)
target_lines = target.target_lines
suggestions = list[EditSuggestion]()
for pred in predictions[:n_suggestions]:
pred_change = self.apply_edit_to_elem(
target,
problem,
pred.out_tks,
)
preview = "\n".join(
compute_line_diffs_fast(
splitlines(pred_change.before),
splitlines(pred_change.after),
)
)
+ input_status, diff_status, changes = compute_line_status(pred_change)
- input_status, change_status = compute_line_status(pred_change)
+ line_offset = target_lines[0]
input_status = [
+ (i + line_offset, tag) for i, tag in input_status.items()
- (i + target_lines[0], tag) for i, tag in input_status.items()
]
+ output_status = list</s> |
coeditor.encoding/TokenizedEdit.show_predictions | Modified | temp-1 | a1e2b73ab836924d0b1f9ed88e4fd90e7a6f61e6 | Implement ablation: current_code_only. | <0>:<add> seg = seg + origin_line + [Newline_id]
| # module: coeditor.encoding
class TokenizedEdit(ABC):
@classmethod
def show_predictions(
cls, pred: TokenSeq, main_tk_lines: dict[Token, TokenSeq]
) -> str:
id_map = {k: i for i, k in enumerate(main_tk_lines)}
segs = output_ids_as_seqs(pred)
lines = []
for k, seg in segs.items():
if not seg:
continue # skip empty lines
if seg[-1] == Del_id:
# show the deleted line
section_lines = tk_splitlines(main_tk_lines.get(k, TokenSeq()))
if section_lines:
origin_line = section_lines[0]
else:
origin_line = cls.BAD_DELETE
- origin_line.append(Newline_id)
- seg = seg + origin_line
<0> label = cls.show_label(id_map.get(k, -1))
lines.append(f"{label}:{indent(decode_tokens(seg), ' ' * 4).lstrip()}")
return "".join(lines)
| ===========unchanged ref 0===========
at: coeditor.common
Token = int
TokenSeq = list[Token]
at: coeditor.encoding
Del_id = get_tk_id(Del)
Newline_id = get_tk_id("\n")
tk_splitlines(tks: TokenSeq)
decode_tokens(tokens: TokenSeq, prettify: bool=False) -> str
output_ids_as_seqs(output_ids: Iterable[Token]) -> dict[Token, TokenSeq]
at: coeditor.encoding.TokenizedEdit
input_tks: TokenSeq
output_tks: TokenSeq
main_tks: TokenSeq
path: ProjectPath
change_type: Change[None]
BAD_DELETE = encode_single_line("((bad delete))")
show_label(i: int)
at: textwrap
indent(text: str, prefix: str, predicate: Optional[Callable[[str], bool]]=...) -> str
at: typing.Mapping
get(key: _KT) -> Optional[_VT_co]
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
|
coeditor.c3problem/C3ProblemTokenizer.tokenize_problem | Modified | temp-1 | a1e2b73ab836924d0b1f9ed88e4fd90e7a6f61e6 | Implement ablation: current_code_only. | <0>:<add> all_refs.append((f"unchanged ref {i}", chunk))
| # module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
<s> ref_size_sum = sum(len(ref) for _, ref in all_refs)
truncated = False
if ref_size_sum < self.max_ref_tks_sum:
+ unchanged = problem.relevant_unchanged
+ if self.disable_unchanged_refs:
- if not self.disable_unchanged_refs:
+ unchanged = {}
+ if self.disable_builtin_defs:
+ unchanged = {
+ k: v for k, v in unchanged.items() if not k.startswith("builtins.")
- unchanged = self._group_encode_unchanged_refs(
- problem.relevant_unchanged
+ }
- )
+ for i, chunk in enumerate(self._group_encode_unchanged_refs(unchanged)):
- for i, chunk in enumerate(unchanged):
- all_refs.append((f"unchanged ref {i}", chunk))
<0> else:
truncated = True
if ref_size_sum < self.max_ref_tks_sum:
changed = self._group_encode_changed_refs(problem.relevant_changes)
for i, chunk in enumerate(changed):
all_refs.append((f"changed ref {i}", chunk))
ref_size_sum += sum(len(x) for x in changed)
else:
truncated = True
# take until we hit the limit
ref_size_sum = 0
kept_refs = list[tuple[str, TkArray]]()
for name, ref in all_refs:
if ref_size_sum + len(ref) > self.max_ref_tks_sum:
truncated = True
break
ref_size_sum += len(ref)
kept_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input</s> | ===========above chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -1
<s>)
+ else:
+ above_tks = above_delta.apply_to_change(above_tks)
below_tks = join_list(origin_lines[edit_stop:] + [TokenSeq()], Newline_id)
chunk_input, above_tks, below_tks = self._inline_some_context(
chunk_input, above_tks, below_tks, input_limit
)
chunk_output = truncate_section(
chunk_output,
TruncateAt.Right,
self.max_output_tks,
add_bos=False,
inplace=True,
)
above_chunks = break_into_chunks(
above_tks,
lambda i: self._encode_headers(span.headers, -1 - i),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
right_to_left=True,
)
if not below_tks:
below_chunks = []
else:
below_chunks = break_into_chunks(
below_tks,
lambda i: self._encode_headers(span.headers, i + 1),
chunk_size=self.max_ref_tks,
overlap=self.ref_chunk_overlap,
)
above_chunks = [
(f"above chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(above_chunks)
]
below_chunks = [
(f"below chunk {i}", TkArray.new(chunk))
for i, chunk in enumerate(below_chunks)
]
all_refs = above_chunks + below_chunks
ref_size_sum = sum(len(ref) for _, ref in all_refs)
truncated = False
</s>
===========above chunk 1===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -2
<s>line)
chunk_input.append(Newline_id)
chunk_input.append(get_extra_id(i))
if l < len(origin_lines):
chunk_input.extend(origin_lines[l])
chunk_input.append(Newline_id)
last_line = l
line_change = join_list(tk_delta.get_line_change(l), Newline_id)
chunk_output.append(get_extra_id(i))
chunk_output.extend(line_change)
if line_change and line_change[-1] != Del_id:
chunk_output.append(Newline_id)
if len(chunk_input) > input_limit:
break
edit_stop = last_line + 1
# limit the input size if it's too long
chunk_input = truncate_section(
chunk_input, TruncateAt.Right, input_limit, inplace=True
)
chunk_output = truncate_output_tks(chunk_input, chunk_output)
# try move some prev_change_tks into the input
above_tks = join_list(origin_lines[:edit_start] + [TokenSeq()], Newline_id)
+ above_delta = tk_delta.for_input_range((0, edit_start))
- above_tks = tk_delta.for_input_range((0, edit_start)).apply_to_change(above_tks)
+ if self.current_code_only:
+ above_tks = above_delta.apply_to_input(above_tks)
+ else:
+ above_tks = above_delta.apply_to_change(above_tks)
</s>
===========above chunk 2===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: -3
+ if self.current_code_only:
+ problem = _problem_to_current(problem)
span = problem.span
-
original: TokenSeq = span.original.tolist()
tk_delta: TkDelta = span.delta
origin_lines = tk_splitlines(original)
+ edit_lines = list(sorted(problem.edit_line_ids))
+ edit_start = edit_lines[0]
- edit_start = problem.edit_line_ids[0]
scope_tks = self._encode_headers(span.headers, 0)
input_limit = self.max_query_tks - len(scope_tks)
chunk_input = TokenSeq()
chunk_output = TokenSeq()
last_line = edit_start
+ for i, l in enumerate(edit_lines):
- for i, l in enumerate(problem.edit_line_ids):
for line in origin_lines[last_line + 1 : l]:
chunk_input.</s>
===========below chunk 0===========
# module: coeditor.c3problem
@dataclass
class C3ProblemTokenizer:
def tokenize_problem(
self,
problem: C3Problem,
) -> TkC3Problem:
# offset: 1
<s>_refs.append((name, ref))
return TkC3Problem(
TkArray.new(chunk_input),
TkArray.new(scope_tks),
TkArray.new(chunk_output),
path=span.headers[-1].path,
change_type=problem.change_type,
named_references=kept_refs,
project=problem.src_info["project"],
commit=problem.src_info["commit"],
truncated=truncated,
)
===========unchanged ref 0===========
at: coeditor.c3problem
C3Problem(span: ChangedCodeSpan, edit_line_ids: Sequence[int], relevant_changes: Sequence[ChangedCodeSpan], relevant_unchanged: Mapping["PyFullName", "PyDefinition"], change_type: Change[None], src_info: SrcInfo, transformations: tuple[str, ...]=())
TkC3Problem(main_input: TkArray, header: TkArray, output: TkArray, path: ProjectPath, change_type: Change[None], named_references: Sequence[tuple[str, TkArray]], project: str, commit: CommitInfo | None, truncated: bool)
_problem_to_current(prob: C3Problem)
at: coeditor.c3problem.C3Problem
span: ChangedCodeSpan
edit_line_ids: Sequence[int]
relevant_changes: Sequence[ChangedCodeSpan]
relevant_unchanged: Mapping["PyFullName", "PyDefinition"]
change_type: Change[None]
src_info: SrcInfo
transformations: tuple[str, ...] = ()
at: coeditor.c3problem.C3ProblemTokenizer
VERSION = "2.7"
max_ref_tks: int = 512
max_query_tks: int = 512
max_output_tks: int = 256
max_scope_tks: int = 128
max_ref_tks_sum: int = 512 * 16
ref_chunk_overlap: int = 32
disable_builtin_defs: bool = True
disable_unchanged_refs: bool = False
current_code_only: bool = False
_encode_headers(scope_changes: Sequence[ChangedHeader], offset: int) -> TokenSeq
_inline_some_context(input: TokenSeq, above_ctx: TokenSeq, below_ctx: TokenSeq, size_limit: int) -> tuple[TokenSeq, TokenSeq, TokenSeq]
_group_encode_unchanged_refs(elems: Mapping[PyFullName, PyDefinition]) -> Sequence[TkArray]
|
coeditor.model/RetrievalEditorModel.train_on_data | Modified | temp-1 | a1e2b73ab836924d0b1f9ed88e4fd90e7a6f61e6 | Implement ablation: current_code_only. | <0>:<add> save_steps=max(500, min(10000, epoch_steps // 5)),
| # module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
<s>dataloader))
n_samples = metrics["loss_per_ex"].weight
metrics = {
f"{metric_key_prefix}_{k}": v.mean() for k, v in metrics.items()
}
return EvalLoopOutput(
predictions=tuple(),
label_ids=tuple(),
metrics=metrics,
num_samples=n_samples,
)
epoch_steps = len(train_loader)
cprint("blue", "Number of training batches (estimate):", epoch_steps)
trainer_args = Seq2SeqTrainingArguments(
output_dir=str(train_dir),
overwrite_output_dir=True,
evaluation_strategy="epoch",
save_strategy="steps",
- save_steps=max(500, min(5000, epoch_steps // 5)),
<0> logging_steps=max(1, min(1000, epoch_steps // 10)),
num_train_epochs=train_args.max_train_epochs,
save_total_limit=3,
lr_scheduler_type=train_args.lr_scheduler_type,
learning_rate=train_args.learning_rate,
weight_decay=train_args.weight_decay,
metric_for_best_model="loss_per_tk",
greater_is_better=False,
fp16=True,
# load_best_model_at_end=True,
push_to_hub=False,
report_to=["wandb"],
disable_tqdm=True,
# torchdynamo="inductor", # use compiled model
)
trainer = DynamicTrainer(
self,
trainer_args,
# callbacks=[EarlyStoppingCallback(</s> | ===========above chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
# offset: -1
train_dir = get_model_dir(trained=False) / training_name
+ # eval_loader.tqdm_args = {"disable": True}
- eval_loader.tqdm_args = {"disable": True}
model = self
# model = torch.compile(self.to("cuda")) # pytorch doesn't support python 3.11 yet.
class DynamicTrainer(Seq2SeqTrainer):
def get_train_dataloader(self):
return train_loader
def get_eval_dataloader(self, eval_dataset):
return eval_loader
def evaluation_loop(
self,
dataloader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
metrics = model.eval_loss_on_loader(as_any(dataloader))
n_samples = metrics["loss_per_ex"].weight
metrics = {
f"{</s>
===========below chunk 0===========
# module: coeditor.model
class RetrievalEditorModel(T5PreTrainedModel):
def train_on_data(
self,
training_name: str,
train_loader: "C3DataLoader",
eval_loader: "C3DataLoader",
train_args: "TrainingArgs",
) -> None:
# offset: 1
<s> )
trainer = DynamicTrainer(
self,
trainer_args,
# callbacks=[EarlyStoppingCallback(early_stopping_patience=1)],
)
trainer.train()
save_dir = get_model_dir(trained=True) / training_name
self.save(save_dir)
print("Model saved to:", save_dir)
===========unchanged ref 0===========
at: coeditor._utils
as_any(x) -> Any
cprint(color: str, *elems, sep: Optional[str]=..., end: Optional[str]=..., file: Optional[SupportsWrite[str]]=..., flush: bool=...)
at: coeditor.common
get_model_dir(trained=True) -> Path
at: coeditor.common.WeightedSum
sum: V
weight: W
mean() -> float
at: coeditor.model.RetrievalEditorModel
is_parallelizable = False
supports_gradient_checkpointing = False
eval_loss_on_loader(dataloader: "C3DataLoader")
decorate_autocast(dataloader: "C3DataLoader")
save(save_dir: Path, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, /, *, is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]="10GB", safe_serialization: bool=False, variant: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, repo_url: Optional[str]=None, organization: Optional[str]=None)
at: coeditor.model.TrainingArgs
learning_rate: float = 2e-5
weight_decay: float = 0.01
max_train_epochs: int = 3
lr_scheduler_type: SchedulerType = SchedulerType.LINEAR
at: transformers.trainer.Trainer
get_train_dataloader(self) -> DataLoader
===========unchanged ref 1===========
get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader
train(resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union["optuna.Trial", Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs)
evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str="eval") -> EvalLoopOutput
at: transformers.trainer_seq2seq
Seq2SeqTrainer(model: Union["PreTrainedModel", nn.Module]=None, args: "TrainingArguments"=None, data_collator: Optional["DataCollator"]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional["PreTrainedTokenizerBase"]=None, model_init: Optional[Callable[[], "PreTrainedModel"]]=None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]]=None, callbacks: Optional[List["TrainerCallback"]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None)
at: transformers.trainer_utils
EvalLoopOutput(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: transformers.training_args.TrainingArguments
framework = "pt"
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
|
scripts.train_model/train_model | Modified | temp-1 | a1e2b73ab836924d0b1f9ed88e4fd90e7a6f61e6 | Implement ablation: current_code_only. | <0>:<add> eval_tkn.max_query_tks = 1024
| <s>train_model
def train_model(
model_name: str,
dataset_name: str,
description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
<s>_from is None:
model = RetrievalEditorModel.from_code_t5("base", reuse_embed=True)
else:
model = RetrievalEditorModel.load(resumed_from)
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
warnings.warn(
"CUDA_VISIBLE_DEVICES not set, using 0. Note that "
"the Huggingface Trainer will use all visible GPUs for training."
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_tkn = encoder.edit_tokenizer
eval_tkn = copy.deepcopy(train_tkn)
- eval_tkn.max_query_tks *= 2
<0> eval_tkn.max_output_tks *= 2
eval_tkn.max_ref_tks_sum *= 2
valid_loader = C3DataLoader(
datasets["valid"], None, eval_tkn, eval_batch_args, shuffle=False, desc="eval"
)
if not eval_only:
# gradually increase the ctx size during training
scales = [4, 2, 1]
for scale in scales:
s_tkn = copy.copy(train_tkn)
s_tkn.max_ref_tks_sum //= scale
s_probs = [
x
for x in datasets["train"]
if sum(c.change_size() for c in x.relevant_changes)
< s_tkn.max_ref_</s> | ===========above chunk 0===========
<s>_model(
model_name: str,
dataset_name: str,
description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -1
<s> datasets,
encoder,
remake_problems=recreate_data,
workers=multiprocessing.cpu_count(),
)
# limit the number of examples for faster testing
datasets["valid"] = random_subset(eval_probs["valid"], 10000, rng=42)
datasets["test"] = random_subset(eval_probs["test"], 10000, rng=42)
config_dict = {
k: get_modified_args(v)
for k, v in {
"description": description,
"edit_tokenizer": encoder.edit_tokenizer.get_args(),
"batch_args": batch_args,
"train_args": train_args,
"dec_args": dec_args,
}.items()
}
project = "Coeditor" if not quicktest else "Coeditor-quicktest"
if eval_only:
project = "eval-" + project
wandb.init(dir="..", project=project, name=model_name, config=config_dict)
if quicktest:
print("Using fewer data for quick test.")
n_quick_exs = 20
datasets = C3ProblemDataset(
train=datasets["train"][:n_quick_exs],
valid=datasets["valid"][:n_quick_exs],
test=datasets["test"][:n_quick_exs],
)
if resumed_from is None:
model = RetrievalEditorModel.from_code_t5("base", reuse_embed=True)
</s>
===========above chunk 1===========
<s>_model(
model_name: str,
dataset_name: str,
description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: -2
dec_args = DecodingArgs()
if quicktest:
model_name = "quicktest-" + model_name
if not eval_only:
check_save_dir(model_name)
# problems will be transformed and saved for valid and test but not train.
datasets = make_or_load_dataset(
dataset_name,
encoder.change_processor,
remake_problems=recreate_data,
workers=multiprocessing.cpu_count(),
)
with timed_action("Making or loading transformed C3 problems for eval"):
# it's important to cache these due to randomness in the transformations
eval_probs = make_or_load_transformed_dataset(
dataset_name,
datasets,
encoder,
remake_problems=recreate_data,
workers=multiprocessing.cpu_count(),</s>
===========below chunk 0===========
<s> train_model(
model_name: str,
dataset_name: str,
description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: 1
<s> if sum(c.change_size() for c in x.relevant_changes)
< s_tkn.max_ref_tks_sum
]
n_probs = max(1, len(s_probs) // max(scales) // 2) * scale
s_probs = random_subset(s_probs, n_probs)
desc = f"training (ctx={s_tkn.max_ref_tks_sum})"
s_loader = C3DataLoader(
s_probs,
encoder.problem_tranform,
s_tkn,
batch_args,
shuffle=True,
desc=desc,
)
with timed_action(desc):
model.train_on_data(model_name, s_loader, valid_loader, train_args)
model.to("cuda")
test_loader = C3DataLoader(
datasets["test"], None, eval_tkn, eval_batch_args, shuffle=False, desc="test"
)
print(f"{len(test_loader)}")
print(f"{len(test_loader.all_probs)}")
with timed_action("Loss Evaluation"):
eval_result = model.eval_loss_on_loader(test_loader)
eval_dict = {f"test/{k}": v.average() for k, v in eval_result.items()}
wandb.log(eval_dict)
with timed_action("Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact</s>
===========below chunk 1===========
<s> train_model(
model_name: str,
dataset_name: str,
description: str,
encoder: C3CombinedEncoder = C3CombinedEncoder(),
batch_args=BatchArgs.train_default(),
eval_batch_args=BatchArgs.eval_default(),
train_args=TrainingArgs(),
recreate_data: bool = False,
resumed_from: Path | None = None,
eval_only: bool = False,
quicktest: bool = False,
):
# offset: 2
<s> with timed_action("Accuracy Evaluation"):
out_dir = get_model_dir() / model_name / "exact_match_samples"
exact_acc = model.eval_on_data(
datasets["test"],
test_loader,
dec_args,
out_dir,
probs_to_save=300,
)
print("Exact-match accuracy:", exact_acc)
wandb.log({"test/exact-acc": exact_acc.average()})
cprint("blue", "Exact-match samples saved to:", out_dir)
return model
|
coeditor.model/RetrivalEncoder.forward | Modified | temp-1 | 84cfd5206348ecc3f54d202b830f803d8a03f26f | Add ablation: dense attention. | <0>:<add> hidden_state_mask=tks_mask,
| # module: coeditor.model
@dataclass
class RetrivalEncoder:
def forward(
self,
input_ids: LongTensor,
references: Sequence[TokenSeq] | None = None,
query_ref_list: Sequence[Sequence[int]] | None = None,
# not used arguments below:
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> RetrivalEncoderOutputs:
<s> use dense implementation
+ rows = list[Tensor]()
+ for i, l in enumerate(q_lens):
+ query = input_ids[i, :l]
+ ref_list = query_ref_list[i]
+ ref_tensors = [to_long_tensor(references[rid]) for rid in ref_list]
+ rows.append(torch.cat(ref_tensors + [query]))
+ tks_tensor, tks_mask = stack_pad_tensors(rows)
+ enc_out = self.encoder.forward(
+ tks_tensor, attention_mask=tks_mask, return_dict=True
+ )
+ return RetrivalEncoderOutputs(
+ last_hidden_state=enc_out.last_hidden_state,
<0> )
def split_outputs(
lens: Sequence[int], out: BaseModelOutputWithPastAndCrossAttentions
) -> Iterable[BaseModelOutputWithPastAndCrossAttentions]:
for i, l in enumerate(lens):
hidden_states = tuple(
s[i : i + 1, :l] for s in not_none(out.hidden_states)
)
yield BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states[-1], # type: ignore
hidden_states=hidden_states, # type: ignore
)
ref_outputs = batched_map(
references,
group_key=lambda ref: _round_length_group(len(ref)),
f=lambda refs: split_outputs(
</s> | ===========above chunk 0===========
# module: coeditor.model
@dataclass
class RetrivalEncoder:
def forward(
self,
input_ids: LongTensor,
references: Sequence[TokenSeq] | None = None,
query_ref_list: Sequence[Sequence[int]] | None = None,
# not used arguments below:
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> RetrivalEncoderOutputs:
# offset: -1
<s>torch.long).to(device),
)
if references is None:
references = []
assert_eq(input_ids.dim(), 2)
assert_eq(input_ids.dtype, torch.long)
device = self.encoder.device
n_queries = input_ids.size(0)
q_lens = input_ids.ne(PAD_id).sum(dim=1).tolist()
n_refs = len(references)
if query_ref_list is None:
query_ref_list = [list(range(n_refs)) for _ in range(n_queries)]
if self.attention_mode.value == AttentionMode.bidirectional.value:
# use bidirectional implementation
queries = [cast(LongTensor, input_ids[i, :l]) for i, l in enumerate(q_lens)]
refs = [
[to_long_tensor(references[rid]) for rid in rids]
for rids in query_ref_list
]
hidden_rows = self.bidirectional_forward(queries, refs)
last_hidden_state, hidden_state_mask = stack_pad_tensors(hidden_rows)
return RetrivalEncoderOutputs(
last_hidden_state=last_hidden_state, hidden_state_mask=hidden_state_mask
+ )
+ elif self.attention_mode.value == AttentionMode.dense.value:
+ # use dense implementation
+ rows = list[Tensor]()
+ for i, l in enumerate(q_lens):
+ </s>
===========above chunk 1===========
# module: coeditor.model
@dataclass
class RetrivalEncoder:
def forward(
self,
input_ids: LongTensor,
references: Sequence[TokenSeq] | None = None,
query_ref_list: Sequence[Sequence[int]] | None = None,
# not used arguments below:
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> RetrivalEncoderOutputs:
# offset: -2
"""
Shapes
- input_ids: (n_queries, seq_len)
- references: (num_refs, ref_len)
- ref_masks: for each query, a list of reference indices. If none,
assume all references are accessible to all queries.
"""
def to_long_tensor(data):
return cast(
LongTensor,
torch.tensor(data, dtype=torch.long).to(device),
)
if references is None:
references = []
assert_</s>
===========below chunk 0===========
# module: coeditor.model
@dataclass
class RetrivalEncoder:
def forward(
self,
input_ids: LongTensor,
references: Sequence[TokenSeq] | None = None,
query_ref_list: Sequence[Sequence[int]] | None = None,
# not used arguments below:
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> RetrivalEncoderOutputs:
# offset: 1
<s>_key=lambda ref: _round_length_group(len(ref)),
f=lambda refs: split_outputs(
[len(x) for x in refs],
self.encoder.forward(
pad_token_seqs(refs).to(device),
output_hidden_states=True,
return_dict=True,
),
),
)
def encode_queries(query_ids: Sequence[int]) -> Iterable[Tensor]:
queries = [cast(LongTensor, input_ids[q, : q_lens[q]]) for q in query_ids]
assert query_ref_list is not None
query_refs = [query_ref_list[q] for q in query_ids]
q_tensor, q_mask = stack_pad_tensors(queries)
assert_eq(q_tensor.dim(), 2)
if self.attention_mode.value == AttentionMode.query2ref.value:
enc = self.encode_query_uni_directional(
query_ids=cast(LongTensor, q_tensor),
query_attention_mask=q_mask,
ref_outputs=ref_outputs,
query_ref_list=query_refs,
)
else:
assert_eq(self.attention_mode.value, AttentionMode.basic.value)
enc = self.encode_query_basic(
query_ids=cast(LongTensor, q_tensor),
query_attention_mask=q_mask,
ref_outputs=ref_outputs,
query_ref_list=query</s>
===========below chunk 1===========
# module: coeditor.model
@dataclass
class RetrivalEncoder:
def forward(
self,
input_ids: LongTensor,
references: Sequence[TokenSeq] | None = None,
query_ref_list: Sequence[Sequence[int]] | None = None,
# not used arguments below:
output_attentions=None,
output_hidden_states=None,
return_dict=None,
tqdm=None,
) -> RetrivalEncoderOutputs:
# offset: 2
<s> query_attention_mask=q_mask,
ref_outputs=ref_outputs,
query_ref_list=query_refs,
)
last_hidden_state, hidden_state_mask = enc
for i, _ in enumerate(queries):
yield last_hidden_state[i, hidden_state_mask[i]]
def query_group_key(q: int) -> tuple[int, int]:
q_len = q_lens[q]
ref_len = sum(
len(not_none(references)[r]) for r in not_none(query_ref_list)[q]
)
return _round_length_group(q_len), _round_length_group(ref_len)
last_hidden_states = batched_map(
range(n_queries),
group_key=query_group_key,
f=encode_queries,
)
last_hidden_state, hidden_state_mask = stack_pad_tensors(last_hidden_states)
return RetrivalEncoderOutputs(
last_hidden_state=last_hidden_state, hidden_state_mask=hidden_state_mask
)
|