From c02fba01e2a6f3810e0cec195df33ba0556056c1 Mon Sep 17 00:00:00 2001 From: Connor Olding Date: Mon, 26 Jun 2017 00:16:51 +0000 Subject: [PATCH] various use updated filenames. don't use emnist by default. tweak expando integer handling. add some comments. --- onn.py | 4 ++-- onn_core.py | 6 ++++-- onn_mnist.py | 6 +++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/onn.py b/onn.py index 66fd8fa..c05608a 100755 --- a/onn.py +++ b/onn.py @@ -6,8 +6,8 @@ # BIG TODO: ensure numpy isn't upcasting to float64 *anywhere*. # this is gonna take some work. -from optim_nn_core import * -from optim_nn_core import _check, _f, _0, _1 +from onn_core import * +from onn_core import _check, _f, _0, _1 import sys diff --git a/onn_core.py b/onn_core.py index d7e2dc5..881ef4e 100644 --- a/onn_core.py +++ b/onn_core.py @@ -825,6 +825,7 @@ class Model: for k, v in used.items(): if not v: + # FIXME: lament undeclared without optim_nn.py! lament("WARNING: unused weight", k) def save_weights(self, fn, overwrite=False): @@ -844,6 +845,7 @@ class Model: data[:] = target.f counts[key] += 1 if counts[key] > 1: + # FIXME: lament undeclared without optim_nn.py! lament("WARNING: rewrote weight", key) f.close() @@ -1084,7 +1086,7 @@ def cosmod(x): class SGDR(Learner): # Stochastic Gradient Descent with Restarts # paper: https://arxiv.org/abs/1608.03983 - # NOTE: this is missing a couple features. + # NOTE: this is missing a couple of the proposed features. per_batch = True @@ -1099,7 +1101,7 @@ class SGDR(Learner): self.expando = expando if expando is not None else lambda i: i if type(self.expando) == int: inc = self.expando - self.expando = self.expando = lambda i: inc + self.expando = lambda i: i * inc self.splits = [] epochs = 0 diff --git a/onn_mnist.py b/onn_mnist.py index f6c0dfa..46e2f91 100755 --- a/onn_mnist.py +++ b/onn_mnist.py @@ -1,11 +1,11 @@ #!/usr/bin/env python3 -from optim_nn import * -from optim_nn_core import _f +from onn import * +from onn_core import _f #np.random.seed(42069) -use_emnist = True +use_emnist = False measure_every_epoch = True