move experimental scoring method to its own file
This commit is contained in:
parent
db5473da43
commit
34713232e3
3 changed files with 48 additions and 49 deletions
|
@ -1,5 +1,6 @@
|
|||
from .colors import *
|
||||
from .prog80 import prog
|
||||
from .scoring import *
|
||||
from .utils import *
|
||||
|
||||
try:
|
||||
|
|
47
thursday/utilities/scoring.py
Normal file
47
thursday/utilities/scoring.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
def perform_another_experimental_scoring_method(results):
|
||||
if len(results) and len(something := next(iter(results.values()))[0]) == 3:
|
||||
history_length = len(something[2])
|
||||
each = {}
|
||||
# for i in (history_length - 1,):
|
||||
for i in range(history_length):
|
||||
# for k, v in results.items(): for vi in v: assert len(vi) == 3, vi
|
||||
l = {k: [(res[2][i], res[1]) for res in v] for k, v in results.items()}
|
||||
for k, v in perform_another_experimental_scoring_method(l).items():
|
||||
each.setdefault(k, []).append(v)
|
||||
return {k: sum(v) / len(v) for k, v in each.items()}
|
||||
|
||||
new_results = {}
|
||||
all_opt_names = set()
|
||||
for obj_name, obj_res in results.items():
|
||||
all_res = {}
|
||||
for fopt, opt_name in obj_res:
|
||||
all_res.setdefault(fopt, []).append(opt_name)
|
||||
all_opt_names.add(opt_name)
|
||||
new_results[obj_name] = dict(sorted(all_res.items()))
|
||||
|
||||
limited_by_floating_point_precision = 53
|
||||
|
||||
best_ranks_and_counts = {}
|
||||
for outer_rank in range(1, limited_by_floating_point_precision + 1):
|
||||
for obj_name, all_res in new_results.items():
|
||||
for fopt, opt_names in all_res.items():
|
||||
dirty = False
|
||||
for opt_name in set(opt_names):
|
||||
if opt_name in best_ranks_and_counts:
|
||||
rank, count = best_ranks_and_counts[opt_name]
|
||||
if rank == outer_rank:
|
||||
best_ranks_and_counts[opt_name] = (rank, count + 1)
|
||||
dirty = True
|
||||
else:
|
||||
best_ranks_and_counts[opt_name] = (outer_rank, 1)
|
||||
dirty = True
|
||||
if dirty:
|
||||
break
|
||||
|
||||
scores = {k: 0.0 for k in all_opt_names}
|
||||
for opt_name, (rank, count) in best_ranks_and_counts.items():
|
||||
points = 2 ** (1 - rank)
|
||||
count = min(count, limited_by_floating_point_precision)
|
||||
scores[opt_name] = score = sum(points / 2**i for i in range(count))
|
||||
|
||||
return scores
|
|
@ -153,55 +153,6 @@ class AcquireForWriting:
|
|||
# move(self._altpath, self.filepath) # assumes os.rename overwrites files
|
||||
|
||||
|
||||
def perform_another_experimental_scoring_method(results):
|
||||
if len(results) and len(something := next(iter(results.values()))[0]) == 3:
|
||||
history_length = len(something[2])
|
||||
each = {}
|
||||
# for i in (history_length - 1,):
|
||||
for i in range(history_length):
|
||||
# for k, v in results.items(): for vi in v: assert len(vi) == 3, vi
|
||||
l = {k: [(res[2][i], res[1]) for res in v] for k, v in results.items()}
|
||||
for k, v in perform_another_experimental_scoring_method(l).items():
|
||||
each.setdefault(k, []).append(v)
|
||||
return {k: sum(v) / len(v) for k, v in each.items()}
|
||||
|
||||
new_results = {}
|
||||
all_opt_names = set()
|
||||
for obj_name, obj_res in results.items():
|
||||
all_res = {}
|
||||
for fopt, opt_name in obj_res:
|
||||
all_res.setdefault(fopt, []).append(opt_name)
|
||||
all_opt_names.add(opt_name)
|
||||
new_results[obj_name] = dict(sorted(all_res.items()))
|
||||
|
||||
limited_by_floating_point_precision = 53
|
||||
|
||||
best_ranks_and_counts = {}
|
||||
for outer_rank in range(1, limited_by_floating_point_precision + 1):
|
||||
for obj_name, all_res in new_results.items():
|
||||
for fopt, opt_names in all_res.items():
|
||||
dirty = False
|
||||
for opt_name in set(opt_names):
|
||||
if opt_name in best_ranks_and_counts:
|
||||
rank, count = best_ranks_and_counts[opt_name]
|
||||
if rank == outer_rank:
|
||||
best_ranks_and_counts[opt_name] = (rank, count + 1)
|
||||
dirty = True
|
||||
else:
|
||||
best_ranks_and_counts[opt_name] = (outer_rank, 1)
|
||||
dirty = True
|
||||
if dirty:
|
||||
break
|
||||
|
||||
scores = {k: 0.0 for k in all_opt_names}
|
||||
for opt_name, (rank, count) in best_ranks_and_counts.items():
|
||||
points = 2 ** (1 - rank)
|
||||
count = min(count, limited_by_floating_point_precision)
|
||||
scores[opt_name] = score = sum(points / 2**i for i in range(count))
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
def needs_rerun(key, value):
|
||||
if value["duration"] < 0.0 or "history" not in value:
|
||||
return True
|
||||
|
|
Loading…
Reference in a new issue