From 34713232e391fd18c5d47e0fa4950d8e22731a11 Mon Sep 17 00:00:00 2001 From: Connor Olding Date: Sun, 7 May 2023 06:44:01 -0700 Subject: [PATCH] move experimental scoring method to its own file --- thursday/utilities/__init__.py | 1 + thursday/utilities/scoring.py | 47 ++++++++++++++++++++++++++++++++ thursday/utilities/utils.py | 49 ---------------------------------- 3 files changed, 48 insertions(+), 49 deletions(-) create mode 100644 thursday/utilities/scoring.py diff --git a/thursday/utilities/__init__.py b/thursday/utilities/__init__.py index 79b9211..e85f74d 100644 --- a/thursday/utilities/__init__.py +++ b/thursday/utilities/__init__.py @@ -1,5 +1,6 @@ from .colors import * from .prog80 import prog +from .scoring import * from .utils import * try: diff --git a/thursday/utilities/scoring.py b/thursday/utilities/scoring.py new file mode 100644 index 0000000..02dd04e --- /dev/null +++ b/thursday/utilities/scoring.py @@ -0,0 +1,47 @@ +def perform_another_experimental_scoring_method(results): + if len(results) and len(something := next(iter(results.values()))[0]) == 3: + history_length = len(something[2]) + each = {} + # for i in (history_length - 1,): + for i in range(history_length): + # for k, v in results.items(): for vi in v: assert len(vi) == 3, vi + l = {k: [(res[2][i], res[1]) for res in v] for k, v in results.items()} + for k, v in perform_another_experimental_scoring_method(l).items(): + each.setdefault(k, []).append(v) + return {k: sum(v) / len(v) for k, v in each.items()} + + new_results = {} + all_opt_names = set() + for obj_name, obj_res in results.items(): + all_res = {} + for fopt, opt_name in obj_res: + all_res.setdefault(fopt, []).append(opt_name) + all_opt_names.add(opt_name) + new_results[obj_name] = dict(sorted(all_res.items())) + + limited_by_floating_point_precision = 53 + + best_ranks_and_counts = {} + for outer_rank in range(1, limited_by_floating_point_precision + 1): + for obj_name, all_res in new_results.items(): + for fopt, opt_names in all_res.items(): + dirty = False + for opt_name in set(opt_names): + if opt_name in best_ranks_and_counts: + rank, count = best_ranks_and_counts[opt_name] + if rank == outer_rank: + best_ranks_and_counts[opt_name] = (rank, count + 1) + dirty = True + else: + best_ranks_and_counts[opt_name] = (outer_rank, 1) + dirty = True + if dirty: + break + + scores = {k: 0.0 for k in all_opt_names} + for opt_name, (rank, count) in best_ranks_and_counts.items(): + points = 2 ** (1 - rank) + count = min(count, limited_by_floating_point_precision) + scores[opt_name] = score = sum(points / 2**i for i in range(count)) + + return scores diff --git a/thursday/utilities/utils.py b/thursday/utilities/utils.py index f46fdc1..d9f7e2e 100644 --- a/thursday/utilities/utils.py +++ b/thursday/utilities/utils.py @@ -153,55 +153,6 @@ class AcquireForWriting: # move(self._altpath, self.filepath) # assumes os.rename overwrites files -def perform_another_experimental_scoring_method(results): - if len(results) and len(something := next(iter(results.values()))[0]) == 3: - history_length = len(something[2]) - each = {} - # for i in (history_length - 1,): - for i in range(history_length): - # for k, v in results.items(): for vi in v: assert len(vi) == 3, vi - l = {k: [(res[2][i], res[1]) for res in v] for k, v in results.items()} - for k, v in perform_another_experimental_scoring_method(l).items(): - each.setdefault(k, []).append(v) - return {k: sum(v) / len(v) for k, v in each.items()} - - new_results = {} - all_opt_names = set() - for obj_name, obj_res in results.items(): - all_res = {} - for fopt, opt_name in obj_res: - all_res.setdefault(fopt, []).append(opt_name) - all_opt_names.add(opt_name) - new_results[obj_name] = dict(sorted(all_res.items())) - - limited_by_floating_point_precision = 53 - - best_ranks_and_counts = {} - for outer_rank in range(1, limited_by_floating_point_precision + 1): - for obj_name, all_res in new_results.items(): - for fopt, opt_names in all_res.items(): - dirty = False - for opt_name in set(opt_names): - if opt_name in best_ranks_and_counts: - rank, count = best_ranks_and_counts[opt_name] - if rank == outer_rank: - best_ranks_and_counts[opt_name] = (rank, count + 1) - dirty = True - else: - best_ranks_and_counts[opt_name] = (outer_rank, 1) - dirty = True - if dirty: - break - - scores = {k: 0.0 for k in all_opt_names} - for opt_name, (rank, count) in best_ranks_and_counts.items(): - points = 2 ** (1 - rank) - count = min(count, limited_by_floating_point_precision) - scores[opt_name] = score = sum(points / 2**i for i in range(count)) - - return scores - - def needs_rerun(key, value): if value["duration"] < 0.0 or "history" not in value: return True