From a4081606f766fe5e24a706903ee0b8396bb47f2a Mon Sep 17 00:00:00 2001 From: Connor Olding Date: Sat, 17 Jun 2017 17:12:59 +0000 Subject: [PATCH] shuffle by default --- optim_nn.py | 5 ----- optim_nn_core.py | 8 +++++++- optim_nn_mnist.py | 5 ----- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/optim_nn.py b/optim_nn.py index fcc695b..736a6b5 100755 --- a/optim_nn.py +++ b/optim_nn.py @@ -910,11 +910,6 @@ def run(program, args=None): measure_error() while training and learner.next(): - indices = np.arange(inputs.shape[0]) - np.random.shuffle(indices) - shuffled_inputs = inputs[indices] - shuffled_outputs = outputs[indices] - avg_loss, losses = ritual.train_batched( shuffled_inputs, shuffled_outputs, config.batch_size, diff --git a/optim_nn_core.py b/optim_nn_core.py index 24965be..d54056e 100644 --- a/optim_nn_core.py +++ b/optim_nn_core.py @@ -946,12 +946,18 @@ class Ritual: # i'm just making up names at this point return avg_mloss def train_batched(self, inputs, outputs, batch_size, - return_losses=False, test_only=False): + return_losses=False, test_only=False, shuffle=True): assert isinstance(return_losses, bool) or return_losses == 'both' if not test_only: self.en += 1 + if shuffle: + indices = np.arange(inputs.shape[0]) + np.random.shuffle(indices) + inputs = inputs[indices] + outputs = outputs[indices] + cumsum_loss, cumsum_mloss = _0, _0 batch_count = inputs.shape[0] // batch_size losses, mlosses = [], [] diff --git a/optim_nn_mnist.py b/optim_nn_mnist.py index 8089810..fcc28e3 100755 --- a/optim_nn_mnist.py +++ b/optim_nn_mnist.py @@ -196,11 +196,6 @@ while learner.next(): if isinstance(node, ActivityRegularizer): node.reg.lamb = act_t * node.reg.lamb_orig # HACK - indices = np.arange(inputs.shape[0]) - np.random.shuffle(indices) - shuffled_inputs = inputs[indices] - shuffled_outputs = outputs[indices] - avg_loss, avg_mloss, losses, mlosses = ritual.train_batched( shuffled_inputs, shuffled_outputs, batch_size=bs,