Skip to content

Commit

Permalink
Minor renaming
Browse files Browse the repository at this point in the history
  • Loading branch information
john-b-yang committed Jul 15, 2024
1 parent a9833c1 commit c6ded6e
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 13 deletions.
8 changes: 1 addition & 7 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,6 @@ cython_debug/
*.jsonl.*
*.patch
*.DS_Store
analysis/**/*.json
analysis/**/scratch*
analysis/benchmark/plots/
analysis/evaluation/*.csv
analysis/evaluation/*.pdf
data/repos/copies
notebooks/
image_build_logs/
run_instance_logs/
*logs/
1 change: 0 additions & 1 deletion swebench/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
compute_pass_to_pass,
get_logs_eval,
get_eval_report,
get_pred_report,
get_resolution_status,
ResolvedStatus,
TestStatus,
Expand Down
6 changes: 3 additions & 3 deletions swebench/harness/grading.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_logs_eval(log_fp: str) -> tuple[dict[str, str], bool]:
return log_parser(content), True


def get_eval_report(
def get_eval_tests_report(
eval_sm: dict[str, str],
gold_results: dict[str, str],
calculate_to_fail: bool = False,
Expand Down Expand Up @@ -216,7 +216,7 @@ def get_resolution_status(report: dict[str, dict[str, Any]]) -> str:
return ResolvedStatus.NO.value


def get_pred_report(
def get_eval_report(
test_spec: TestSpec,
prediction: dict[str, str],
log_path: str,
Expand Down Expand Up @@ -264,7 +264,7 @@ def get_pred_report(
"PASS_TO_PASS": test_spec.PASS_TO_PASS,
}

report = get_eval_report(eval_sm, eval_ref)
report = get_eval_tests_report(eval_sm, eval_ref)
if get_resolution_status(report) == "RESOLVED_FULL":
report_map[instance_id]["resolved"] = True

Expand Down
4 changes: 2 additions & 2 deletions swebench/harness/run_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
close_logger,
setup_logger,
)
from swebench.harness.grading import get_pred_report
from swebench.harness.grading import get_eval_report
from swebench.harness.test_spec import make_test_spec, TestSpec
from swebench.harness.utils import load_swebench_dataset, str2bool

Expand Down Expand Up @@ -173,7 +173,7 @@ def run_instance(

# Get report from test output
logger.info(f"Grading answer for {instance_id}...")
report = get_pred_report(
report = get_eval_report(
test_spec=test_spec,
prediction=pred,
log_path=test_output_path,
Expand Down

0 comments on commit c6ded6e

Please sign in to comment.