2017-06-30 19:17:46 -07:00
|
|
|
import sys
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
import numpy as np
|
2017-02-14 13:02:30 -08:00
|
|
|
_f = np.float32
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
# just for speed, not strictly essential:
|
|
|
|
from scipy.special import expit as sigmoid
|
|
|
|
|
2017-04-10 02:53:54 -07:00
|
|
|
# used for numbering layers like Keras, and keeping initialization consistent:
|
|
|
|
from collections import defaultdict, OrderedDict
|
2017-02-12 17:29:52 -08:00
|
|
|
_layer_counters = defaultdict(lambda: 0)
|
|
|
|
|
2017-02-14 13:02:30 -08:00
|
|
|
def _check(a):
|
|
|
|
assert isinstance(a, np.ndarray) or type(a) == _f, type(a)
|
|
|
|
assert a.dtype == _f, a.dtype
|
|
|
|
return a
|
|
|
|
|
|
|
|
_0 = _f(0)
|
|
|
|
_1 = _f(1)
|
|
|
|
_2 = _f(2)
|
|
|
|
_inv2 = _f(1/2)
|
|
|
|
_sqrt2 = _f(np.sqrt(2))
|
|
|
|
_invsqrt2 = _f(1/np.sqrt(2))
|
|
|
|
_pi = _f(np.pi)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
class LayerIncompatibility(Exception):
|
|
|
|
pass
|
|
|
|
|
2017-06-30 17:44:56 -07:00
|
|
|
# Node Traversal {{{1
|
|
|
|
|
|
|
|
class DummyNode:
|
|
|
|
name = "Dummy"
|
|
|
|
|
|
|
|
def __init__(self, children=None, parents=None):
|
|
|
|
self.children = children if children is not None else []
|
|
|
|
self.parents = parents if parents is not None else []
|
|
|
|
|
|
|
|
def traverse(node_in, node_out, nodes=None, dummy_mode=False):
|
|
|
|
# i have no idea if this is any algorithm in particular.
|
|
|
|
nodes = nodes if nodes is not None else []
|
|
|
|
|
|
|
|
seen_up = {}
|
|
|
|
q = [node_out]
|
|
|
|
while len(q) > 0:
|
|
|
|
node = q.pop(0)
|
|
|
|
seen_up[node] = True
|
|
|
|
for parent in node.parents:
|
|
|
|
q.append(parent)
|
|
|
|
|
|
|
|
if dummy_mode:
|
|
|
|
seen_up[node_in] = True
|
|
|
|
|
|
|
|
nodes = []
|
|
|
|
q = [node_in]
|
|
|
|
while len(q) > 0:
|
|
|
|
node = q.pop(0)
|
|
|
|
if not seen_up[node]:
|
|
|
|
continue
|
|
|
|
parents_added = (parent in nodes for parent in node.parents)
|
|
|
|
if not node in nodes and all(parents_added):
|
|
|
|
nodes.append(node)
|
|
|
|
for child in node.children:
|
|
|
|
q.append(child)
|
|
|
|
|
|
|
|
if dummy_mode:
|
|
|
|
nodes.remove(node_in)
|
|
|
|
|
|
|
|
return nodes
|
|
|
|
|
|
|
|
def traverse_all(nodes_in, nodes_out, nodes=None):
|
|
|
|
all_in = DummyNode(children=nodes_in)
|
|
|
|
all_out = DummyNode(parents=nodes_out)
|
|
|
|
return traverse(all_in, all_out, nodes, dummy_mode=True)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Initializations {{{1
|
|
|
|
|
|
|
|
# note: these are currently only implemented for 2D shapes.
|
|
|
|
|
2017-04-10 02:53:54 -07:00
|
|
|
def init_zeros(size, ins=None, outs=None):
|
|
|
|
return np.zeros(size)
|
|
|
|
|
|
|
|
def init_ones(size, ins=None, outs=None):
|
|
|
|
return np.ones(size)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
def init_he_normal(size, ins, outs):
|
|
|
|
s = np.sqrt(2 / ins)
|
|
|
|
return np.random.normal(0, s, size=size)
|
|
|
|
|
|
|
|
def init_he_uniform(size, ins, outs):
|
|
|
|
s = np.sqrt(6 / ins)
|
|
|
|
return np.random.uniform(-s, s, size=size)
|
|
|
|
|
2017-02-25 23:41:38 -08:00
|
|
|
def init_glorot_normal(size, ins, outs):
|
|
|
|
s = np.sqrt(2 / (ins + outs))
|
|
|
|
return np.random.normal(0, s, size=size)
|
|
|
|
|
|
|
|
def init_glorot_uniform(size, ins, outs):
|
|
|
|
s = np.sqrt(6 / (ins + outs))
|
|
|
|
return np.random.uniform(-s, s, size=size)
|
|
|
|
|
2017-04-11 03:11:08 -07:00
|
|
|
# Weight container {{{1
|
|
|
|
|
|
|
|
class Weights:
|
|
|
|
# we may or may not contain weights -- or any information, for that matter.
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
self.f = None # forward weights
|
|
|
|
self.g = None # backward weights (gradients)
|
|
|
|
self.shape = None
|
|
|
|
self.init = None
|
|
|
|
self.allocator = None
|
|
|
|
self.regularizer = None
|
|
|
|
|
|
|
|
self.configure(**kwargs)
|
|
|
|
|
|
|
|
def configure(self, **kwargs):
|
|
|
|
for k, v in kwargs.items():
|
|
|
|
getattr(self, k) # ensures the key already exists
|
|
|
|
setattr(self, k, v)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self):
|
|
|
|
assert self.shape is not None
|
|
|
|
return np.prod(self.shape)
|
|
|
|
|
|
|
|
def allocate(self, *args, **kwargs):
|
|
|
|
self.configure(**kwargs)
|
|
|
|
|
|
|
|
# intentionally not using isinstance
|
|
|
|
assert type(self.shape) == tuple, self.shape
|
|
|
|
|
|
|
|
f, g = self.allocator(self.size)
|
|
|
|
assert len(f) == self.size, "{} != {}".format(f.shape, self.size)
|
|
|
|
assert len(g) == self.size, "{} != {}".format(g.shape, self.size)
|
|
|
|
f[:] = self.init(self.size, *args)
|
|
|
|
g[:] = self.init(self.size, *args)
|
|
|
|
self.f = f.reshape(self.shape)
|
|
|
|
self.g = g.reshape(self.shape)
|
|
|
|
|
|
|
|
def forward(self):
|
|
|
|
if self.regularizer is None:
|
|
|
|
return 0.0
|
|
|
|
return self.regularizer.forward(self.f)
|
|
|
|
|
|
|
|
def backward(self):
|
|
|
|
if self.regularizer is None:
|
|
|
|
return 0.0
|
|
|
|
return self.regularizer.backward(self.f)
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
if self.regularizer is None:
|
|
|
|
return
|
|
|
|
self.g += self.regularizer.backward(self.f)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Loss functions {{{1
|
|
|
|
|
|
|
|
class Loss:
|
2017-02-15 20:18:53 -08:00
|
|
|
pass
|
2017-02-13 17:53:10 -08:00
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
class CategoricalCrossentropy(Loss):
|
|
|
|
# lifted from theano
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-26 17:52:17 -08:00
|
|
|
def __init__(self, eps=1e-6):
|
2017-02-15 20:18:53 -08:00
|
|
|
self.eps = _f(eps)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, p, y):
|
2017-02-15 20:18:53 -08:00
|
|
|
p = np.clip(p, self.eps, 1 - self.eps)
|
|
|
|
f = np.sum(-y * np.log(p) - (1 - y) * np.log(1 - p), axis=-1)
|
2017-02-16 14:10:33 -08:00
|
|
|
return np.mean(f)
|
2017-02-15 20:18:53 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, p, y):
|
2017-02-15 20:18:53 -08:00
|
|
|
p = np.clip(p, self.eps, 1 - self.eps)
|
|
|
|
df = (p - y) / (p * (1 - p))
|
2017-02-16 14:10:33 -08:00
|
|
|
return df / len(y)
|
2017-02-15 20:18:53 -08:00
|
|
|
|
2017-02-26 18:43:51 -08:00
|
|
|
class Accuracy(Loss):
|
|
|
|
# returns percentage of categories correctly predicted.
|
2017-03-12 23:42:21 -07:00
|
|
|
# utilizes argmax(), so it cannot be used for gradient descent.
|
2017-02-26 18:43:51 -08:00
|
|
|
# use CategoricalCrossentropy for that instead.
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, p, y):
|
2017-02-26 18:43:51 -08:00
|
|
|
correct = np.argmax(p, axis=-1) == np.argmax(y, axis=-1)
|
|
|
|
return np.mean(correct)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, p, y):
|
2017-02-26 18:43:51 -08:00
|
|
|
raise NotImplementedError("cannot take the gradient of Accuracy")
|
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
class ResidualLoss(Loss):
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, p, y):
|
2017-02-15 20:18:53 -08:00
|
|
|
return np.mean(self.f(p - y))
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, p, y):
|
2017-02-16 14:10:33 -08:00
|
|
|
ret = self.df(p - y) / len(y)
|
|
|
|
return ret
|
2017-02-15 20:18:53 -08:00
|
|
|
|
|
|
|
class Squared(ResidualLoss):
|
2017-02-12 17:29:52 -08:00
|
|
|
def f(self, r):
|
|
|
|
return np.square(r)
|
|
|
|
|
|
|
|
def df(self, r):
|
|
|
|
return 2 * r
|
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
class Absolute(ResidualLoss):
|
2017-02-12 17:29:52 -08:00
|
|
|
def f(self, r):
|
|
|
|
return np.abs(r)
|
|
|
|
|
|
|
|
def df(self, r):
|
|
|
|
return np.sign(r)
|
|
|
|
|
2017-04-10 07:36:08 -07:00
|
|
|
# Regularizers {{{1
|
|
|
|
|
|
|
|
class Regularizer:
|
|
|
|
pass
|
|
|
|
|
|
|
|
class L1L2(Regularizer):
|
|
|
|
def __init__(self, l1=0.0, l2=0.0):
|
|
|
|
self.l1 = _f(l1)
|
|
|
|
self.l2 = _f(l2)
|
|
|
|
|
|
|
|
def forward(self, X):
|
2017-04-10 21:46:54 -07:00
|
|
|
f = _0
|
2017-04-10 07:36:08 -07:00
|
|
|
if self.l1:
|
|
|
|
f += np.sum(self.l1 * np.abs(X))
|
|
|
|
if self.l2:
|
|
|
|
f += np.sum(self.l2 * np.square(X))
|
|
|
|
return f
|
|
|
|
|
|
|
|
def backward(self, X):
|
|
|
|
df = np.zeros_like(X)
|
|
|
|
if self.l1:
|
|
|
|
df += self.l1 * np.sign(X)
|
|
|
|
if self.l2:
|
|
|
|
df += self.l2 * 2 * X
|
|
|
|
return df
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Optimizers {{{1
|
|
|
|
|
|
|
|
class Optimizer:
|
|
|
|
def __init__(self, alpha=0.1):
|
2017-02-16 14:10:33 -08:00
|
|
|
self.alpha = _f(alpha) # learning rate
|
2017-02-12 17:29:52 -08:00
|
|
|
self.reset()
|
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def compute(self, dW, W):
|
|
|
|
return -self.alpha * dW
|
|
|
|
|
|
|
|
def update(self, dW, W):
|
|
|
|
W += self.compute(dW, W)
|
|
|
|
|
|
|
|
# the following optimizers are blatantly lifted from tiny-dnn:
|
|
|
|
# https://github.com/tiny-dnn/tiny-dnn/blob/master/tiny_dnn/optimizers/optimizer.h
|
|
|
|
|
|
|
|
class Momentum(Optimizer):
|
2017-02-17 22:53:44 -08:00
|
|
|
def __init__(self, alpha=0.01, mu=0.9, nesterov=False):
|
2017-02-14 13:02:30 -08:00
|
|
|
self.mu = _f(mu) # momentum
|
2017-02-12 17:29:52 -08:00
|
|
|
self.nesterov = bool(nesterov)
|
|
|
|
|
2017-02-16 14:10:33 -08:00
|
|
|
super().__init__(alpha)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def reset(self):
|
2017-02-17 22:53:44 -08:00
|
|
|
self.Vprev = None
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def compute(self, dW, W):
|
2017-02-17 22:53:44 -08:00
|
|
|
if self.Vprev is None:
|
|
|
|
self.Vprev = np.copy(dW)
|
|
|
|
|
|
|
|
V = self.mu * self.Vprev - self.alpha * dW
|
|
|
|
self.Vprev[:] = V
|
|
|
|
if self.nesterov:
|
|
|
|
return self.mu * V - self.alpha * dW
|
|
|
|
|
2017-02-16 14:10:33 -08:00
|
|
|
return V
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-13 17:53:10 -08:00
|
|
|
class RMSprop(Optimizer):
|
|
|
|
# RMSprop generalizes* Adagrad, etc.
|
|
|
|
|
2017-02-15 10:43:57 -08:00
|
|
|
# TODO: verify this is correct:
|
2017-02-13 17:53:10 -08:00
|
|
|
# * RMSprop == Adagrad when
|
|
|
|
# RMSprop.mu == 1
|
|
|
|
|
|
|
|
def __init__(self, alpha=0.0001, mu=0.99, eps=1e-8):
|
2017-02-14 13:02:30 -08:00
|
|
|
self.mu = _f(mu) # decay term
|
|
|
|
self.eps = _f(eps)
|
2017-02-13 17:53:10 -08:00
|
|
|
|
|
|
|
# one might consider the following equation when specifying mu:
|
|
|
|
# mu = e**(-1/t)
|
|
|
|
# default: t = -1/ln(0.99) = ~99.5
|
|
|
|
# therefore the default of mu=0.99 means
|
|
|
|
# an input decays to 1/e its original amplitude over 99.5 epochs.
|
|
|
|
# (this is from DSP, so how relevant it is in SGD is debatable)
|
|
|
|
|
2017-02-16 14:10:33 -08:00
|
|
|
super().__init__(alpha)
|
2017-02-13 17:53:10 -08:00
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
self.g = None
|
|
|
|
|
|
|
|
def compute(self, dW, W):
|
|
|
|
if self.g is None:
|
|
|
|
self.g = np.zeros_like(dW)
|
|
|
|
|
|
|
|
# basically apply a first-order low-pass filter to delta squared
|
|
|
|
self.g[:] = self.mu * self.g + (1 - self.mu) * dW * dW
|
|
|
|
# equivalent (though numerically different?):
|
|
|
|
#self.g += (dW * dW - self.g) * (1 - self.mu)
|
|
|
|
|
|
|
|
# finally sqrt it to complete the running root-mean-square approximation
|
2017-06-20 17:13:53 -07:00
|
|
|
return -self.alpha * dW / (np.sqrt(self.g) + self.eps)
|
2017-02-13 17:53:10 -08:00
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
class Adam(Optimizer):
|
2017-02-17 22:53:44 -08:00
|
|
|
# paper: https://arxiv.org/abs/1412.6980
|
2017-02-13 17:53:10 -08:00
|
|
|
# Adam generalizes* RMSprop, and
|
|
|
|
# adds a decay term to the regular (non-squared) delta, and
|
|
|
|
# does some decay-gain voodoo. (i guess it's compensating
|
|
|
|
# for the filtered deltas starting from zero)
|
|
|
|
|
|
|
|
# * Adam == RMSprop when
|
|
|
|
# Adam.b1 == 0
|
|
|
|
# Adam.b2 == RMSprop.mu
|
|
|
|
|
2017-02-17 22:53:44 -08:00
|
|
|
def __init__(self, alpha=0.002, b1=0.9, b2=0.999, eps=1e-8):
|
2017-02-14 13:02:30 -08:00
|
|
|
self.b1 = _f(b1) # decay term
|
|
|
|
self.b2 = _f(b2) # decay term
|
2017-02-17 22:53:44 -08:00
|
|
|
self.b1_t_default = _f(b1) # decay term power t
|
|
|
|
self.b2_t_default = _f(b2) # decay term power t
|
2017-02-14 13:02:30 -08:00
|
|
|
self.eps = _f(eps)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-16 14:10:33 -08:00
|
|
|
super().__init__(alpha)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
self.mt = None
|
|
|
|
self.vt = None
|
|
|
|
self.b1_t = self.b1_t_default
|
|
|
|
self.b2_t = self.b2_t_default
|
|
|
|
|
|
|
|
def compute(self, dW, W):
|
|
|
|
if self.mt is None:
|
2017-02-13 17:53:10 -08:00
|
|
|
self.mt = np.zeros_like(dW)
|
2017-02-12 17:29:52 -08:00
|
|
|
if self.vt is None:
|
2017-02-13 17:53:10 -08:00
|
|
|
self.vt = np.zeros_like(dW)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-13 17:53:10 -08:00
|
|
|
# decay gain
|
2017-02-12 17:29:52 -08:00
|
|
|
self.b1_t *= self.b1
|
|
|
|
self.b2_t *= self.b2
|
|
|
|
|
2017-02-13 17:53:10 -08:00
|
|
|
# filter
|
2017-02-12 17:29:52 -08:00
|
|
|
self.mt[:] = self.b1 * self.mt + (1 - self.b1) * dW
|
|
|
|
self.vt[:] = self.b2 * self.vt + (1 - self.b2) * dW * dW
|
|
|
|
|
|
|
|
return -self.alpha * (self.mt / (1 - self.b1_t)) \
|
2017-06-20 17:13:53 -07:00
|
|
|
/ (np.sqrt(self.vt / (1 - self.b2_t)) + self.eps)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-17 22:53:44 -08:00
|
|
|
class Nadam(Optimizer):
|
|
|
|
# paper: https://arxiv.org/abs/1412.6980
|
|
|
|
# paper: http://cs229.stanford.edu/proj2015/054_report.pdf
|
2017-02-27 01:07:25 -08:00
|
|
|
# TODO: double-check this implementation. also read the damn paper.
|
2017-02-17 22:53:44 -08:00
|
|
|
# lifted from https://github.com/fchollet/keras/blob/5d38b04/keras/optimizers.py#L530
|
|
|
|
# lifted from https://github.com/jpilaul/IFT6266_project/blob/master/Models/Algo_Momentum.py
|
|
|
|
|
|
|
|
def __init__(self, alpha=0.002, b1=0.9, b2=0.999, eps=1e-8):
|
|
|
|
self.b1 = _f(b1) # decay term
|
|
|
|
self.b2 = _f(b2) # decay term
|
|
|
|
self.eps = _f(eps)
|
|
|
|
|
|
|
|
super().__init__(alpha)
|
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
self.mt = None
|
|
|
|
self.vt = None
|
|
|
|
self.t = 0
|
|
|
|
self.sched = 1
|
|
|
|
|
|
|
|
def compute(self, dW, W):
|
|
|
|
self.t += 1
|
|
|
|
|
|
|
|
if self.mt is None:
|
|
|
|
self.mt = np.zeros_like(dW)
|
|
|
|
if self.vt is None:
|
|
|
|
self.vt = np.zeros_like(dW)
|
|
|
|
|
|
|
|
ut0 = self.b1 * (1 - 0.5 * 0.96**(self.t + 0))
|
|
|
|
ut1 = self.b1 * (1 - 0.5 * 0.96**(self.t + 1))
|
|
|
|
|
|
|
|
sched0 = self.sched * ut0
|
|
|
|
sched1 = self.sched * ut0 * ut1
|
|
|
|
self.sched = sched0
|
|
|
|
|
|
|
|
gp = dW / (1 - sched0)
|
|
|
|
|
|
|
|
self.mt[:] = self.b1 * self.mt + (1 - self.b1) * dW
|
|
|
|
self.vt[:] = self.b2 * self.vt + (1 - self.b2) * np.square(dW)
|
|
|
|
|
|
|
|
mtp = self.mt / (1 - sched1)
|
|
|
|
vtp = self.vt / (1 - self.b2**self.t)
|
|
|
|
|
|
|
|
mt_bar = (1 - ut0) * gp + ut1 * mtp
|
|
|
|
|
|
|
|
return -self.alpha * mt_bar / (np.sqrt(vtp) + self.eps)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Abstract Layers {{{1
|
|
|
|
|
|
|
|
class Layer:
|
|
|
|
def __init__(self):
|
|
|
|
self.parents = []
|
|
|
|
self.children = []
|
2017-04-10 02:53:54 -07:00
|
|
|
self.weights = OrderedDict()
|
2017-04-10 21:46:54 -07:00
|
|
|
self.loss = None # for activity regularizers
|
2017-02-12 17:29:52 -08:00
|
|
|
self.input_shape = None
|
|
|
|
self.output_shape = None
|
|
|
|
kind = self.__class__.__name__
|
|
|
|
global _layer_counters
|
|
|
|
_layer_counters[kind] += 1
|
|
|
|
self.name = "{}_{}".format(kind, _layer_counters[kind])
|
|
|
|
self.unsafe = False # disables assertions for better performance
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.name
|
|
|
|
|
|
|
|
# methods we might want to override:
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
raise NotImplementedError("unimplemented", self)
|
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
def forward_deterministic(self, X):
|
|
|
|
return self.forward(X)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
raise NotImplementedError("unimplemented", self)
|
|
|
|
|
2017-04-10 01:26:38 -07:00
|
|
|
def make_shape(self, parent):
|
|
|
|
if self.input_shape == None:
|
|
|
|
self.input_shape = parent.output_shape
|
|
|
|
if self.output_shape == None:
|
|
|
|
self.output_shape = self.input_shape
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
def do_feed(self, child):
|
|
|
|
self.children.append(child)
|
|
|
|
|
|
|
|
def be_fed(self, parent):
|
|
|
|
self.parents.append(parent)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
# TODO: better names for these (still)
|
2017-04-11 03:32:48 -07:00
|
|
|
def _propagate(self, edges, deterministic):
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
2017-02-27 01:07:25 -08:00
|
|
|
assert len(edges) == 1, self
|
2017-04-11 03:32:48 -07:00
|
|
|
if deterministic:
|
|
|
|
return self.forward_deterministic(edges[0])
|
|
|
|
else:
|
|
|
|
return self.forward(edges[0])
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-03-18 22:11:19 -07:00
|
|
|
def _backpropagate(self, edges):
|
2017-02-27 01:07:25 -08:00
|
|
|
if len(edges) == 1:
|
|
|
|
return self.backward(edges[0])
|
|
|
|
return sum((self.backward(dY) for dY in edges))
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
# general utility methods:
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def is_compatible(self, parent):
|
2017-02-16 14:10:33 -08:00
|
|
|
return np.all(self.input_shape == parent.output_shape)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def feed(self, child):
|
2017-04-10 01:26:38 -07:00
|
|
|
assert self.output_shape is not None, self
|
|
|
|
child.make_shape(self)
|
2017-02-27 01:07:25 -08:00
|
|
|
if not child.is_compatible(self):
|
2017-02-12 17:29:52 -08:00
|
|
|
fmt = "{} is incompatible with {}: shape mismatch: {} vs. {}"
|
2017-02-27 01:07:25 -08:00
|
|
|
raise LayerIncompatibility(fmt.format(self, child, self.output_shape, child.input_shape))
|
2017-02-12 17:29:52 -08:00
|
|
|
self.do_feed(child)
|
|
|
|
child.be_fed(self)
|
|
|
|
return child
|
|
|
|
|
|
|
|
def validate_input(self, X):
|
|
|
|
assert X.shape[1:] == self.input_shape, (str(self), X.shape[1:], self.input_shape)
|
|
|
|
|
|
|
|
def validate_output(self, Y):
|
|
|
|
assert Y.shape[1:] == self.output_shape, (str(self), Y.shape[1:], self.output_shape)
|
|
|
|
|
2017-04-10 02:53:54 -07:00
|
|
|
def _new_weights(self, name, **kwargs):
|
|
|
|
w = Weights(**kwargs)
|
|
|
|
assert name not in self.weights, name
|
|
|
|
self.weights[name] = w
|
|
|
|
return w
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self):
|
|
|
|
return sum((w.size for w in self.weights.values()))
|
|
|
|
|
|
|
|
def init(self, allocator):
|
|
|
|
ins, outs = self.input_shape[0], self.output_shape[0]
|
|
|
|
for k, w in self.weights.items():
|
|
|
|
w.allocate(ins, outs, allocator=allocator)
|
2017-02-17 22:53:44 -08:00
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
def propagate(self, values, deterministic):
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
2017-02-16 14:10:33 -08:00
|
|
|
assert self.parents, self
|
2017-02-27 01:07:25 -08:00
|
|
|
edges = []
|
2017-02-12 17:29:52 -08:00
|
|
|
for parent in self.parents:
|
|
|
|
# TODO: skip over irrelevant nodes (if any)
|
2017-02-27 01:07:25 -08:00
|
|
|
X = values[parent]
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
|
|
|
self.validate_input(X)
|
2017-02-27 01:07:25 -08:00
|
|
|
edges.append(X)
|
2017-04-11 03:32:48 -07:00
|
|
|
Y = self._propagate(edges, deterministic)
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
|
|
|
self.validate_output(Y)
|
|
|
|
return Y
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backpropagate(self, values):
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
2017-02-16 14:10:33 -08:00
|
|
|
assert self.children, self
|
2017-02-27 01:07:25 -08:00
|
|
|
edges = []
|
2017-02-12 17:29:52 -08:00
|
|
|
for child in self.children:
|
|
|
|
# TODO: skip over irrelevant nodes (if any)
|
2017-02-27 01:07:25 -08:00
|
|
|
dY = values[child]
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
|
|
|
self.validate_output(dY)
|
2017-02-27 01:07:25 -08:00
|
|
|
edges.append(dY)
|
2017-03-18 22:11:19 -07:00
|
|
|
dX = self._backpropagate(edges)
|
2017-02-12 17:29:52 -08:00
|
|
|
if not self.unsafe:
|
|
|
|
self.validate_input(dX)
|
|
|
|
return dX
|
|
|
|
|
|
|
|
# Nonparametric Layers {{{1
|
|
|
|
|
|
|
|
class Input(Layer):
|
|
|
|
def __init__(self, shape):
|
|
|
|
assert shape is not None
|
|
|
|
super().__init__()
|
|
|
|
self.shape = tuple(shape)
|
|
|
|
self.input_shape = self.shape
|
|
|
|
self.output_shape = self.shape
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
return X
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
#self.dY = dY
|
|
|
|
return np.zeros_like(dY)
|
|
|
|
|
2017-02-25 23:41:38 -08:00
|
|
|
class Reshape(Layer):
|
|
|
|
def __init__(self, new_shape):
|
|
|
|
super().__init__()
|
|
|
|
self.shape = tuple(new_shape)
|
|
|
|
self.output_shape = self.shape
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-25 23:41:38 -08:00
|
|
|
self.batch_size = X.shape[0]
|
|
|
|
return X.reshape(self.batch_size, *self.output_shape)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-25 23:41:38 -08:00
|
|
|
assert dY.shape[0] == self.batch_size
|
|
|
|
return dY.reshape(self.batch_size, *self.input_shape)
|
|
|
|
|
2017-02-26 18:05:33 -08:00
|
|
|
class Flatten(Layer):
|
2017-04-10 01:26:38 -07:00
|
|
|
def make_shape(self, parent):
|
|
|
|
shape = parent.output_shape
|
|
|
|
self.input_shape = shape
|
2017-02-26 18:05:33 -08:00
|
|
|
self.output_shape = (np.prod(shape),)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-26 18:05:33 -08:00
|
|
|
self.batch_size = X.shape[0]
|
|
|
|
return X.reshape(self.batch_size, *self.output_shape)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-26 18:05:33 -08:00
|
|
|
assert dY.shape[0] == self.batch_size
|
|
|
|
return dY.reshape(self.batch_size, *self.input_shape)
|
|
|
|
|
2017-03-18 22:11:19 -07:00
|
|
|
class ConstAffine(Layer):
|
2017-02-12 17:29:52 -08:00
|
|
|
def __init__(self, a=1, b=0):
|
|
|
|
super().__init__()
|
2017-02-14 13:02:30 -08:00
|
|
|
self.a = _f(a)
|
|
|
|
self.b = _f(b)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
return self.a * X + self.b
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return dY * self.a
|
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
class Sum(Layer):
|
2017-04-11 03:32:48 -07:00
|
|
|
def _propagate(self, edges, deterministic):
|
2017-02-27 01:07:25 -08:00
|
|
|
return np.sum(edges, axis=0)
|
2017-02-15 20:18:53 -08:00
|
|
|
|
2017-03-18 22:11:19 -07:00
|
|
|
def _backpropagate(self, edges):
|
2017-02-27 01:07:25 -08:00
|
|
|
#assert len(edges) == 1, "unimplemented"
|
|
|
|
return edges[0] # TODO: does this always work?
|
2017-02-15 20:18:53 -08:00
|
|
|
|
2017-04-11 03:11:08 -07:00
|
|
|
class ActivityRegularizer(Layer):
|
|
|
|
def __init__(self, reg):
|
|
|
|
super().__init__()
|
|
|
|
assert isinstance(reg, Regularizer), reg
|
|
|
|
self.reg = reg
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
self.X = X
|
|
|
|
self.loss = np.sum(self.reg.forward(X))
|
|
|
|
return X
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return dY + self.reg.backward(self.X)
|
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
class Dropout(Layer):
|
|
|
|
def __init__(self, dropout=0.0):
|
|
|
|
super().__init__()
|
|
|
|
self.p = _f(1 - dropout)
|
|
|
|
assert 0 <= self.p <= 1
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
self.mask = (np.random.rand(*X.shape) < self.p) / self.p
|
|
|
|
return X * self.mask
|
|
|
|
|
|
|
|
def forward_deterministic(self, X):
|
|
|
|
#self.mask = _1
|
|
|
|
return X
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return dY * self.mask
|
|
|
|
|
2017-04-11 03:11:08 -07:00
|
|
|
# Activation Layers {{{2
|
|
|
|
|
2017-04-23 10:40:47 -07:00
|
|
|
class Sigmoid(Layer): # aka Logistic, Expit (inverse of Logit)
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.sig = sigmoid(X)
|
2017-02-18 18:43:42 -08:00
|
|
|
return self.sig
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return dY * self.sig * (1 - self.sig)
|
|
|
|
|
2017-04-23 10:40:47 -07:00
|
|
|
class Softplus(Layer):
|
|
|
|
# integral of Sigmoid.
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
self.X = X
|
|
|
|
return np.log(1 + np.exp(X))
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return sigmoid(self.X)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
class Tanh(Layer):
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.sig = np.tanh(X)
|
2017-02-18 18:43:42 -08:00
|
|
|
return self.sig
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return dY * (1 - self.sig * self.sig)
|
|
|
|
|
2017-04-23 10:40:47 -07:00
|
|
|
class LeCunTanh(Layer):
|
|
|
|
# paper: http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
|
|
|
|
# paper: http://yann.lecun.com/exdb/publis/pdf/lecun-89.pdf
|
|
|
|
# scaled such that f([-1, 1]) = [-1, 1].
|
|
|
|
# helps preserve an input variance of 1.
|
|
|
|
# second derivative peaks around an input of ±1.
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
self.sig = np.tanh(2 / 3 * X)
|
|
|
|
return 1.7159 * self.sig
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return dY * (2 / 3 * 1.7159) * (1 - self.sig * self.sig)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
class Relu(Layer):
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.cond = X >= 0
|
|
|
|
return np.where(self.cond, X, 0)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return np.where(self.cond, dY, 0)
|
|
|
|
|
|
|
|
class Elu(Layer):
|
|
|
|
# paper: https://arxiv.org/abs/1511.07289
|
|
|
|
|
|
|
|
def __init__(self, alpha=1):
|
|
|
|
super().__init__()
|
2017-06-17 09:45:50 -07:00
|
|
|
self.alpha = _f(alpha) # FIXME: unused
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.cond = X >= 0
|
|
|
|
self.neg = np.exp(X) - 1
|
|
|
|
return np.where(self.cond, X, self.neg)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return dY * np.where(self.cond, 1, self.neg + 1)
|
|
|
|
|
|
|
|
class GeluApprox(Layer):
|
|
|
|
# paper: https://arxiv.org/abs/1606.08415
|
|
|
|
# plot: https://www.desmos.com/calculator/ydzgtccsld
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.a = 1.704 * X
|
|
|
|
self.sig = sigmoid(self.a)
|
|
|
|
return X * self.sig
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-02-12 17:29:52 -08:00
|
|
|
return dY * self.sig * (1 + self.a * (1 - self.sig))
|
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
class Softmax(Layer):
|
|
|
|
def __init__(self, axis=-1):
|
|
|
|
super().__init__()
|
|
|
|
self.axis = int(axis)
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-15 20:18:53 -08:00
|
|
|
alpha = np.max(X, axis=-1, keepdims=True)
|
|
|
|
num = np.exp(X - alpha)
|
|
|
|
den = np.sum(num, axis=-1, keepdims=True)
|
|
|
|
self.sm = num / den
|
|
|
|
return self.sm
|
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-03-18 22:11:19 -07:00
|
|
|
return (dY - np.sum(dY * self.sm, axis=-1, keepdims=True)) * self.sm
|
|
|
|
|
|
|
|
class LogSoftmax(Softmax):
|
|
|
|
def __init__(self, axis=-1, eps=1e-6):
|
|
|
|
super().__init__()
|
|
|
|
self.axis = int(axis)
|
|
|
|
self.eps = _f(eps)
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
return np.log(super().forward(X) + self.eps)
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return dY - np.sum(dY, axis=-1, keepdims=True) * self.sm
|
2017-02-15 20:18:53 -08:00
|
|
|
|
2017-06-09 05:32:44 -07:00
|
|
|
class Cos(Layer):
|
|
|
|
# performs well on MNIST for some strange reason.
|
|
|
|
|
|
|
|
def forward(self, X):
|
|
|
|
self.X = X
|
|
|
|
return np.cos(X)
|
|
|
|
|
|
|
|
def backward(self, dY):
|
|
|
|
return dY * -np.sin(self.X)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Parametric Layers {{{1
|
|
|
|
|
|
|
|
class Dense(Layer):
|
2017-02-27 14:14:58 -08:00
|
|
|
serialized = {
|
|
|
|
'W': 'coeffs',
|
|
|
|
'b': 'biases',
|
|
|
|
}
|
|
|
|
|
2017-04-10 07:36:08 -07:00
|
|
|
def __init__(self, dim, init=init_he_uniform, reg_w=None, reg_b=None):
|
2017-02-12 17:29:52 -08:00
|
|
|
super().__init__()
|
2017-02-14 13:02:30 -08:00
|
|
|
self.dim = int(dim)
|
2017-02-12 17:29:52 -08:00
|
|
|
self.output_shape = (dim,)
|
2017-04-10 07:36:08 -07:00
|
|
|
self.coeffs = self._new_weights('coeffs', init=init, regularizer=reg_w)
|
|
|
|
self.biases = self._new_weights('biases', init=init_zeros, regularizer=reg_b)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-04-10 01:26:38 -07:00
|
|
|
def make_shape(self, parent):
|
|
|
|
shape = parent.output_shape
|
|
|
|
self.input_shape = shape
|
2017-04-10 03:34:58 -07:00
|
|
|
assert len(shape) == 1, shape
|
2017-04-10 02:53:54 -07:00
|
|
|
self.coeffs.shape = (shape[0], self.dim)
|
|
|
|
self.biases.shape = (1, self.dim)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def forward(self, X):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.X = X
|
2017-04-10 02:53:54 -07:00
|
|
|
return X.dot(self.coeffs.f) + self.biases.f
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 01:07:25 -08:00
|
|
|
def backward(self, dY):
|
2017-04-10 02:53:54 -07:00
|
|
|
self.coeffs.g[:] = self.X.T.dot(dY)
|
|
|
|
self.biases.g[:] = dY.sum(0, keepdims=True)
|
|
|
|
return dY.dot(self.coeffs.f.T)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
# Models {{{1
|
|
|
|
|
|
|
|
class Model:
|
2017-06-30 17:44:56 -07:00
|
|
|
def __init__(self, nodes_in, nodes_out, unsafe=False):
|
|
|
|
nodes_in = [nodes_in] if isinstance(nodes_in, Layer) else nodes_in
|
|
|
|
nodes_out = [nodes_out] if isinstance(nodes_out, Layer) else nodes_out
|
|
|
|
assert type(nodes_in) == list, type(nodes_in)
|
|
|
|
assert type(nodes_out) == list, type(nodes_out)
|
|
|
|
self.nodes_in = nodes_in
|
|
|
|
self.nodes_out = nodes_out
|
|
|
|
self.nodes = traverse_all(self.nodes_in, self.nodes_out)
|
2017-02-12 17:29:52 -08:00
|
|
|
self.make_weights()
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in self.nodes:
|
2017-02-12 17:29:52 -08:00
|
|
|
node.unsafe = unsafe
|
|
|
|
|
2017-06-30 17:44:56 -07:00
|
|
|
@property
|
|
|
|
def ordered_nodes(self):
|
|
|
|
# deprecated? we don't guarantee an order like we did before.
|
|
|
|
return self.nodes
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
def make_weights(self):
|
2017-06-30 17:44:56 -07:00
|
|
|
self.param_count = sum((node.size for node in self.nodes))
|
2017-02-14 13:02:30 -08:00
|
|
|
self.W = np.zeros(self.param_count, dtype=_f)
|
|
|
|
self.dW = np.zeros(self.param_count, dtype=_f)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
offset = 0
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in self.nodes:
|
2017-04-10 02:53:54 -07:00
|
|
|
if node.size > 0:
|
|
|
|
inner_offset = 0
|
|
|
|
|
|
|
|
def allocate(size):
|
|
|
|
nonlocal inner_offset
|
|
|
|
o = offset + inner_offset
|
|
|
|
ret = self.W[o:o+size], self.dW[o:o+size]
|
|
|
|
inner_offset += size
|
|
|
|
assert len(ret[0]) == len(ret[1])
|
|
|
|
assert size == len(ret[0]), (size, len(ret[0]))
|
|
|
|
return ret
|
|
|
|
|
|
|
|
node.init(allocate)
|
|
|
|
assert inner_offset <= node.size, "Layer {} allocated more weights than it said it would".format(node)
|
|
|
|
# i don't care if "less" is grammatically incorrect.
|
|
|
|
# you're mom is grammatically incorrect.
|
|
|
|
assert inner_offset >= node.size, "Layer {} allocated less weights than it said it would".format(node)
|
2017-02-12 17:29:52 -08:00
|
|
|
offset += node.size
|
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
def forward(self, X, deterministic=False):
|
2017-02-27 01:07:25 -08:00
|
|
|
values = dict()
|
2017-06-30 17:44:56 -07:00
|
|
|
input_node = self.nodes[0]
|
|
|
|
output_node = self.nodes[-1]
|
2017-04-11 03:32:48 -07:00
|
|
|
values[input_node] = input_node._propagate(np.expand_dims(X, 0), deterministic)
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in self.nodes[1:]:
|
2017-04-11 03:32:48 -07:00
|
|
|
values[node] = node.propagate(values, deterministic)
|
2017-02-27 01:07:25 -08:00
|
|
|
return values[output_node]
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def backward(self, error):
|
2017-02-27 01:07:25 -08:00
|
|
|
values = dict()
|
2017-06-30 17:44:56 -07:00
|
|
|
output_node = self.nodes[-1]
|
2017-03-18 22:11:19 -07:00
|
|
|
values[output_node] = output_node._backpropagate(np.expand_dims(error, 0))
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in reversed(self.nodes[:-1]):
|
2017-02-27 01:07:25 -08:00
|
|
|
values[node] = node.backpropagate(values)
|
2017-02-12 17:29:52 -08:00
|
|
|
return self.dW
|
|
|
|
|
2017-04-10 07:36:08 -07:00
|
|
|
def regulate_forward(self):
|
|
|
|
loss = _0
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in self.nodes:
|
2017-04-10 21:46:54 -07:00
|
|
|
if node.loss is not None:
|
|
|
|
loss += node.loss
|
2017-04-10 07:36:08 -07:00
|
|
|
for k, w in node.weights.items():
|
|
|
|
loss += w.forward()
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def regulate(self):
|
2017-06-30 17:44:56 -07:00
|
|
|
for node in self.nodes:
|
2017-04-10 07:36:08 -07:00
|
|
|
for k, w in node.weights.items():
|
|
|
|
w.update()
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
def load_weights(self, fn):
|
|
|
|
# seemingly compatible with keras' Dense layers.
|
|
|
|
import h5py
|
2017-02-27 14:14:58 -08:00
|
|
|
open(fn) # just ensure the file exists (python's error is better)
|
|
|
|
f = h5py.File(fn, 'r')
|
2017-02-12 17:29:52 -08:00
|
|
|
weights = {}
|
|
|
|
def visitor(name, obj):
|
|
|
|
if isinstance(obj, h5py.Dataset):
|
2017-02-14 13:02:30 -08:00
|
|
|
weights[name.split('/')[-1]] = np.array(obj[:], dtype=_f)
|
2017-02-12 17:29:52 -08:00
|
|
|
f.visititems(visitor)
|
|
|
|
f.close()
|
|
|
|
|
2017-02-27 14:14:58 -08:00
|
|
|
used = {}
|
|
|
|
for k in weights.keys():
|
|
|
|
used[k] = False
|
|
|
|
|
2017-06-30 17:44:56 -07:00
|
|
|
nodes = [node for node in self.nodes if node.size > 0]
|
2017-02-27 14:14:58 -08:00
|
|
|
for node in nodes:
|
|
|
|
full_name = str(node).lower()
|
|
|
|
for s_name, o_name in node.serialized.items():
|
|
|
|
key = full_name + '_' + s_name
|
|
|
|
data = weights[key]
|
|
|
|
target = getattr(node, o_name)
|
2017-04-10 02:53:54 -07:00
|
|
|
target.f[:] = data
|
2017-02-27 14:14:58 -08:00
|
|
|
used[key] = True
|
|
|
|
|
|
|
|
for k, v in used.items():
|
|
|
|
if not v:
|
2017-06-25 17:16:51 -07:00
|
|
|
# FIXME: lament undeclared without optim_nn.py!
|
2017-02-27 14:14:58 -08:00
|
|
|
lament("WARNING: unused weight", k)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def save_weights(self, fn, overwrite=False):
|
|
|
|
import h5py
|
|
|
|
f = h5py.File(fn, 'w')
|
|
|
|
|
2017-02-27 14:14:58 -08:00
|
|
|
counts = defaultdict(lambda: 0)
|
|
|
|
|
2017-06-30 17:44:56 -07:00
|
|
|
nodes = [node for node in self.nodes if node.size > 0]
|
2017-02-27 14:14:58 -08:00
|
|
|
for node in nodes:
|
|
|
|
full_name = str(node).lower()
|
|
|
|
grp = f.create_group(full_name)
|
|
|
|
for s_name, o_name in node.serialized.items():
|
|
|
|
key = full_name + '_' + s_name
|
|
|
|
target = getattr(node, o_name)
|
|
|
|
data = grp.create_dataset(key, target.shape, dtype=_f)
|
2017-04-10 02:53:54 -07:00
|
|
|
data[:] = target.f
|
2017-02-27 14:14:58 -08:00
|
|
|
counts[key] += 1
|
|
|
|
if counts[key] > 1:
|
2017-06-25 17:16:51 -07:00
|
|
|
# FIXME: lament undeclared without optim_nn.py!
|
2017-02-27 14:14:58 -08:00
|
|
|
lament("WARNING: rewrote weight", key)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
2017-06-30 19:17:46 -07:00
|
|
|
def print_graph(self, file=sys.stdout):
|
|
|
|
print('digraph G {', file=file)
|
|
|
|
for node in self.nodes:
|
|
|
|
children = [str(n) for n in node.children]
|
|
|
|
if children:
|
|
|
|
sep = '->'
|
|
|
|
print('\t' + str(node) + sep + (';\n\t' + str(node) + sep).join(children) + ';', file=file)
|
|
|
|
print('}', file=file)
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
# Rituals {{{1
|
|
|
|
|
|
|
|
class Ritual: # i'm just making up names at this point
|
|
|
|
def __init__(self, learner=None, loss=None, mloss=None):
|
|
|
|
self.learner = learner if learner is not None else Learner(Optimizer())
|
|
|
|
self.loss = loss if loss is not None else Squared()
|
|
|
|
self.mloss = mloss if mloss is not None else loss
|
2017-04-10 07:36:08 -07:00
|
|
|
self.model = None
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
self.learner.reset(optim=True)
|
2017-02-17 18:37:04 -08:00
|
|
|
self.en = 0
|
|
|
|
self.bn = 0
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
def measure(self, p, y):
|
2017-02-27 01:07:25 -08:00
|
|
|
return self.mloss.forward(p, y)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-04-10 07:36:08 -07:00
|
|
|
def forward(self, p, y):
|
|
|
|
return self.loss.forward(p, y) + self.model.regulate_forward()
|
|
|
|
|
|
|
|
def backward(self, p, y):
|
2017-02-27 01:07:25 -08:00
|
|
|
return self.loss.backward(p, y)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def learn(self, inputs, outputs):
|
|
|
|
predicted = self.model.forward(inputs)
|
2017-04-10 07:36:08 -07:00
|
|
|
self.model.backward(self.backward(predicted, outputs))
|
|
|
|
self.model.regulate()
|
2017-02-15 20:18:53 -08:00
|
|
|
return predicted
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def update(self):
|
|
|
|
self.learner.optim.update(self.model.dW, self.model.W)
|
|
|
|
|
|
|
|
def prepare(self, model):
|
|
|
|
self.en = 0
|
|
|
|
self.bn = 0
|
|
|
|
self.model = model
|
|
|
|
|
2017-03-22 11:41:50 -07:00
|
|
|
def train_batched_gen(self, generator, batch_count,
|
|
|
|
return_losses=False, test_only=False):
|
|
|
|
assert isinstance(return_losses, bool) or return_losses == 'both'
|
|
|
|
|
|
|
|
if not test_only:
|
|
|
|
self.en += 1
|
|
|
|
|
|
|
|
cumsum_loss, cumsum_mloss = _0, _0
|
|
|
|
losses, mlosses = [], []
|
|
|
|
|
|
|
|
prev_batch_size = None
|
|
|
|
|
|
|
|
for b in range(batch_count):
|
|
|
|
if not test_only:
|
|
|
|
self.bn += 1
|
|
|
|
|
|
|
|
# TODO: pass a GeneratorData object containing en, bn, ritual/model fields.
|
|
|
|
# ...is there a pythonic way of doing that?
|
|
|
|
batch_inputs, batch_outputs = next(generator)
|
|
|
|
|
|
|
|
batch_size = batch_inputs.shape[0]
|
|
|
|
assert batch_size == prev_batch_size or prev_batch_size is None, \
|
|
|
|
"non-constant batch size (got {} expected {})".format(
|
|
|
|
batch_size, prev_batch_size) # TODO: lift this restriction
|
|
|
|
prev_batch_size = batch_size
|
|
|
|
|
2017-04-10 01:26:38 -07:00
|
|
|
# same from hereon
|
|
|
|
|
2017-03-22 11:41:50 -07:00
|
|
|
if not test_only and self.learner.per_batch:
|
|
|
|
self.learner.batch(b / batch_count)
|
|
|
|
|
|
|
|
if test_only:
|
2017-04-11 03:32:48 -07:00
|
|
|
predicted = self.model.forward(batch_inputs, deterministic=True)
|
2017-03-22 11:41:50 -07:00
|
|
|
else:
|
|
|
|
predicted = self.learn(batch_inputs, batch_outputs)
|
|
|
|
self.update()
|
|
|
|
|
|
|
|
if return_losses == 'both':
|
2017-04-10 07:36:08 -07:00
|
|
|
batch_loss = self.forward(predicted, batch_outputs)
|
2017-03-22 11:41:50 -07:00
|
|
|
if np.isnan(batch_loss):
|
|
|
|
raise Exception("nan")
|
|
|
|
losses.append(batch_loss)
|
|
|
|
cumsum_loss += batch_loss
|
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
# NOTE: this can use the non-deterministic predictions. fixme?
|
2017-03-22 11:41:50 -07:00
|
|
|
batch_mloss = self.measure(predicted, batch_outputs)
|
|
|
|
if np.isnan(batch_mloss):
|
|
|
|
raise Exception("nan")
|
|
|
|
if return_losses:
|
|
|
|
mlosses.append(batch_mloss)
|
|
|
|
cumsum_mloss += batch_mloss
|
|
|
|
|
|
|
|
avg_mloss = cumsum_mloss / _f(batch_count)
|
|
|
|
if return_losses == 'both':
|
|
|
|
avg_loss = cumsum_loss / _f(batch_count)
|
|
|
|
return avg_loss, avg_mloss, losses, mlosses
|
|
|
|
elif return_losses:
|
|
|
|
return avg_mloss, mlosses
|
|
|
|
return avg_mloss
|
|
|
|
|
2017-02-27 14:48:49 -08:00
|
|
|
def train_batched(self, inputs, outputs, batch_size,
|
2017-06-17 10:12:59 -07:00
|
|
|
return_losses=False, test_only=False, shuffle=True):
|
2017-02-27 14:48:49 -08:00
|
|
|
assert isinstance(return_losses, bool) or return_losses == 'both'
|
|
|
|
|
|
|
|
if not test_only:
|
|
|
|
self.en += 1
|
|
|
|
|
2017-06-17 10:12:59 -07:00
|
|
|
if shuffle:
|
|
|
|
indices = np.arange(inputs.shape[0])
|
|
|
|
np.random.shuffle(indices)
|
|
|
|
inputs = inputs[indices]
|
|
|
|
outputs = outputs[indices]
|
|
|
|
|
2017-02-27 14:48:49 -08:00
|
|
|
cumsum_loss, cumsum_mloss = _0, _0
|
2017-02-12 17:29:52 -08:00
|
|
|
batch_count = inputs.shape[0] // batch_size
|
2017-02-27 14:48:49 -08:00
|
|
|
losses, mlosses = [], []
|
|
|
|
|
2017-02-15 20:18:53 -08:00
|
|
|
assert inputs.shape[0] % batch_size == 0, \
|
|
|
|
"inputs is not evenly divisible by batch_size" # TODO: lift this restriction
|
2017-02-12 17:29:52 -08:00
|
|
|
for b in range(batch_count):
|
2017-02-27 14:48:49 -08:00
|
|
|
if not test_only:
|
|
|
|
self.bn += 1
|
|
|
|
|
2017-02-12 17:29:52 -08:00
|
|
|
bi = b * batch_size
|
|
|
|
batch_inputs = inputs[ bi:bi+batch_size]
|
|
|
|
batch_outputs = outputs[bi:bi+batch_size]
|
|
|
|
|
2017-04-10 01:26:38 -07:00
|
|
|
# same from hereon
|
|
|
|
|
2017-02-27 14:48:49 -08:00
|
|
|
if not test_only and self.learner.per_batch:
|
2017-02-27 16:36:04 -08:00
|
|
|
self.learner.batch(b / batch_count)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-03-12 03:53:14 -07:00
|
|
|
if test_only:
|
2017-04-11 03:32:48 -07:00
|
|
|
predicted = self.model.forward(batch_inputs, deterministic=True)
|
2017-03-12 03:53:14 -07:00
|
|
|
else:
|
|
|
|
predicted = self.learn(batch_inputs, batch_outputs)
|
2017-02-27 14:48:49 -08:00
|
|
|
self.update()
|
2017-02-12 17:29:52 -08:00
|
|
|
|
2017-02-27 14:48:49 -08:00
|
|
|
if return_losses == 'both':
|
2017-04-10 07:36:08 -07:00
|
|
|
batch_loss = self.forward(predicted, batch_outputs)
|
2017-02-27 14:48:49 -08:00
|
|
|
if np.isnan(batch_loss):
|
|
|
|
raise Exception("nan")
|
2017-02-25 23:41:38 -08:00
|
|
|
losses.append(batch_loss)
|
2017-02-27 14:48:49 -08:00
|
|
|
cumsum_loss += batch_loss
|
2017-02-25 23:41:38 -08:00
|
|
|
|
2017-04-11 03:32:48 -07:00
|
|
|
# NOTE: this can use the non-deterministic predictions. fixme?
|
2017-02-27 14:48:49 -08:00
|
|
|
batch_mloss = self.measure(predicted, batch_outputs)
|
|
|
|
if np.isnan(batch_mloss):
|
2017-02-12 17:29:52 -08:00
|
|
|
raise Exception("nan")
|
|
|
|
if return_losses:
|
2017-02-27 14:48:49 -08:00
|
|
|
mlosses.append(batch_mloss)
|
|
|
|
cumsum_mloss += batch_mloss
|
|
|
|
|
|
|
|
avg_mloss = cumsum_mloss / _f(batch_count)
|
|
|
|
if return_losses == 'both':
|
|
|
|
avg_loss = cumsum_loss / _f(batch_count)
|
|
|
|
return avg_loss, avg_mloss, losses, mlosses
|
|
|
|
elif return_losses:
|
|
|
|
return avg_mloss, mlosses
|
|
|
|
return avg_mloss
|
|
|
|
|
|
|
|
def test_batched(self, *args, **kwargs):
|
|
|
|
return self.train_batched(*args, test_only=True, **kwargs)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
# Learners {{{1
|
|
|
|
|
|
|
|
class Learner:
|
|
|
|
per_batch = False
|
|
|
|
|
|
|
|
def __init__(self, optim, epochs=100, rate=None):
|
|
|
|
assert isinstance(optim, Optimizer)
|
|
|
|
self.optim = optim
|
2017-02-14 13:02:30 -08:00
|
|
|
self.start_rate = optim.alpha if rate is None else _f(rate)
|
2017-02-12 17:29:52 -08:00
|
|
|
self.epochs = int(epochs)
|
|
|
|
self.reset()
|
|
|
|
|
|
|
|
def reset(self, optim=False):
|
|
|
|
self.started = False
|
|
|
|
self.epoch = 0
|
|
|
|
if optim:
|
|
|
|
self.optim.reset()
|
|
|
|
|
|
|
|
@property
|
|
|
|
def epoch(self):
|
|
|
|
return self._epoch
|
|
|
|
|
|
|
|
@epoch.setter
|
|
|
|
def epoch(self, new_epoch):
|
|
|
|
self._epoch = int(new_epoch)
|
2017-03-22 14:41:24 -07:00
|
|
|
if 0 <= self.epoch <= self.epochs:
|
|
|
|
self.rate = self.rate_at(self._epoch)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
@property
|
|
|
|
def rate(self):
|
|
|
|
return self.optim.alpha
|
|
|
|
|
|
|
|
@rate.setter
|
|
|
|
def rate(self, new_rate):
|
|
|
|
self.optim.alpha = new_rate
|
|
|
|
|
|
|
|
def rate_at(self, epoch):
|
|
|
|
return self.start_rate
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
# prepares the next epoch. returns whether or not to continue training.
|
2017-03-22 14:41:24 -07:00
|
|
|
if not self.started:
|
2017-02-12 17:29:52 -08:00
|
|
|
self.started = True
|
2017-03-22 14:41:24 -07:00
|
|
|
self.epoch += 1
|
|
|
|
if self.epoch > self.epochs:
|
|
|
|
return False
|
2017-02-12 17:29:52 -08:00
|
|
|
return True
|
|
|
|
|
|
|
|
def batch(self, progress): # TODO: rename
|
|
|
|
# interpolates rates between epochs.
|
|
|
|
# unlike epochs, we do not store batch number as a state.
|
|
|
|
# i.e. calling next() will not respect progress.
|
|
|
|
assert 0 <= progress <= 1
|
2017-03-22 14:41:24 -07:00
|
|
|
self.rate = self.rate_at(self._epoch - 1 + progress)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
@property
|
|
|
|
def final_rate(self):
|
|
|
|
return self.rate_at(self.epochs - 1)
|
|
|
|
|
|
|
|
class AnnealingLearner(Learner):
|
|
|
|
def __init__(self, optim, epochs=100, rate=None, halve_every=10):
|
2017-02-14 13:02:30 -08:00
|
|
|
self.halve_every = _f(halve_every)
|
|
|
|
self.anneal = _f(0.5**(1/self.halve_every))
|
2017-02-12 17:29:52 -08:00
|
|
|
super().__init__(optim, epochs, rate)
|
|
|
|
|
|
|
|
def rate_at(self, epoch):
|
|
|
|
return self.start_rate * self.anneal**epoch
|
|
|
|
|
|
|
|
def cosmod(x):
|
|
|
|
# plot: https://www.desmos.com/calculator/hlgqmyswy2
|
2017-02-14 13:02:30 -08:00
|
|
|
return (_1 + np.cos((x % _1) * _pi)) * _inv2
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
class SGDR(Learner):
|
|
|
|
# Stochastic Gradient Descent with Restarts
|
|
|
|
# paper: https://arxiv.org/abs/1608.03983
|
2017-06-25 17:16:51 -07:00
|
|
|
# NOTE: this is missing a couple of the proposed features.
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
per_batch = True
|
|
|
|
|
|
|
|
def __init__(self, optim, epochs=100, rate=None,
|
|
|
|
restarts=0, restart_decay=0.5, callback=None,
|
2017-06-17 18:58:34 -07:00
|
|
|
expando=0):
|
2017-02-12 17:29:52 -08:00
|
|
|
self.restart_epochs = int(epochs)
|
2017-02-14 13:02:30 -08:00
|
|
|
self.decay = _f(restart_decay)
|
2017-02-12 17:29:52 -08:00
|
|
|
self.restarts = int(restarts)
|
|
|
|
self.restart_callback = callback
|
|
|
|
# TODO: rename expando to something not insane
|
2017-02-16 14:10:33 -08:00
|
|
|
self.expando = expando if expando is not None else lambda i: i
|
2017-06-17 18:58:34 -07:00
|
|
|
if type(self.expando) == int:
|
|
|
|
inc = self.expando
|
2017-06-25 17:16:51 -07:00
|
|
|
self.expando = lambda i: i * inc
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
self.splits = []
|
|
|
|
epochs = 0
|
|
|
|
for i in range(0, self.restarts + 1):
|
2017-02-16 14:10:33 -08:00
|
|
|
split = epochs + self.restart_epochs + int(self.expando(i))
|
2017-02-12 17:29:52 -08:00
|
|
|
self.splits.append(split)
|
|
|
|
epochs = split
|
|
|
|
super().__init__(optim, epochs, rate)
|
|
|
|
|
|
|
|
def split_num(self, epoch):
|
|
|
|
shit = [0] + self.splits # hack
|
|
|
|
for i in range(0, len(self.splits)):
|
2017-04-10 08:56:32 -07:00
|
|
|
if epoch < self.splits[i]:
|
2017-02-12 17:29:52 -08:00
|
|
|
sub_epoch = epoch - shit[i]
|
|
|
|
next_restart = self.splits[i] - shit[i]
|
|
|
|
return i, sub_epoch, next_restart
|
2017-04-10 08:56:32 -07:00
|
|
|
if epoch == self.splits[-1]:
|
|
|
|
return len(self.splits) - 1, epoch, self.splits[-1]
|
2017-02-12 17:29:52 -08:00
|
|
|
raise Exception('this should never happen.')
|
|
|
|
|
|
|
|
def rate_at(self, epoch):
|
|
|
|
restart, sub_epoch, next_restart = self.split_num(epoch)
|
2017-02-14 13:02:30 -08:00
|
|
|
x = _f(sub_epoch) / _f(next_restart)
|
|
|
|
return self.start_rate * self.decay**_f(restart) * cosmod(x)
|
2017-02-12 17:29:52 -08:00
|
|
|
|
|
|
|
def next(self):
|
|
|
|
if not super().next():
|
|
|
|
return False
|
|
|
|
restart, sub_epoch, next_restart = self.split_num(self.epoch)
|
|
|
|
if restart > 0 and sub_epoch == 0:
|
|
|
|
if self.restart_callback is not None:
|
|
|
|
self.restart_callback(restart)
|
|
|
|
return True
|
2017-02-28 17:12:56 -08:00
|
|
|
|
|
|
|
class TriangularCLR(Learner):
|
|
|
|
# note: i haven't actually read (nor seen) the paper(s) on CLR,
|
|
|
|
# but this case (triangular) should be pretty difficult to get wrong.
|
|
|
|
|
|
|
|
per_batch = True
|
|
|
|
|
|
|
|
def __init__(self, optim, epochs=400, upper_rate=None, lower_rate=0,
|
|
|
|
frequency=100, callback=None):
|
|
|
|
# NOTE: start_rate is treated as upper_rate
|
|
|
|
self.frequency = int(frequency)
|
|
|
|
assert self.frequency > 0
|
|
|
|
self.callback = callback
|
|
|
|
self.lower_rate = _f(lower_rate)
|
|
|
|
super().__init__(optim, epochs, upper_rate)
|
|
|
|
|
|
|
|
def _t(self, epoch):
|
|
|
|
# NOTE: this could probably be simplified
|
|
|
|
offset = self.frequency / 2
|
|
|
|
return np.abs(((epoch + offset) % self.frequency) - offset) / offset
|
|
|
|
|
|
|
|
def rate_at(self, epoch):
|
|
|
|
# NOTE: start_rate is treated as upper_rate
|
|
|
|
return self._t(epoch) * (self.start_rate - self.lower_rate) + self.lower_rate
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
if not super().next():
|
|
|
|
return False
|
|
|
|
if self.epoch > 1 and self.epoch % self.frequency == 0:
|
|
|
|
if self.callback is not None:
|
|
|
|
self.callback(self.epoch // self.frequency)
|
|
|
|
return True
|
|
|
|
|
|
|
|
class SineCLR(TriangularCLR):
|
|
|
|
def _t(self, epoch):
|
|
|
|
return np.sin(_pi * _inv2 * super()._t(epoch))
|
2017-04-10 08:56:32 -07:00
|
|
|
|
|
|
|
class WaveCLR(TriangularCLR):
|
|
|
|
def _t(self, epoch):
|
|
|
|
return _inv2 * (_1 - np.cos(_pi * super()._t(epoch)))
|