Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2021-05-01 06:57:17

0001 #!/usr/bin/env python3
0002 
0003 from __future__ import print_function
0004 import downhill
0005 import numpy as np
0006 import theano
0007 import theano.tensor as TT
0008 
0009 FLOAT = 'df'[theano.config.floatX == 'float32']
0010 
0011 def rand(a, b):
0012     return np.random.randn(a, b).astype(FLOAT)
0013 
0014 A, B, K = 20, 5, 3
0015 
0016 # Set up a matrix factorization problem to optimize.
0017 u = theano.shared(rand(A, K), name='u')
0018 v = theano.shared(rand(K, B), name='v')
0019 z = TT.matrix()
0020 err = TT.sqr(z - TT.dot(u, v))
0021 loss = err.mean() + abs(u).mean() + (v * v).mean()
0022 
0023 # Minimize the regularized loss with respect to a data matrix.
0024 y = np.dot(rand(A, K), rand(K, B)) + rand(A, B)
0025 
0026 # Monitor during optimization.
0027 monitors = (('err', err.mean()),
0028             ('|u|<0.1', (abs(u) < 0.1).mean()),
0029             ('|v|<0.1', (abs(v) < 0.1).mean()))
0030 
0031 #minimize(
0032 #loss, 
0033 #train, 
0034 #batch_size=32, 
0035 #monitor_gradients=False, 
0036 #monitors=(), 
0037 
0038 #valid=None, 
0039 #params=None, 
0040 #inputs=None, 
0041 #algo='rmsprop', 
0042 #updates=(), 
0043 #train_batches=None, 
0044 #valid_batches=None, 
0045 #**kwargs)
0046 
0047 downhill.minimize(
0048     loss=loss,
0049     train=[y],
0050     patience=0,
0051     batch_size=A,                 # Process y as a single batch.
0052     max_gradient_norm=1,          # Prevent gradient explosion!
0053     learning_rate=0.1,
0054     monitors=monitors,
0055     monitor_gradients=True)
0056 
0057 # Print out the optimized coefficients u and basis v.
0058 print(('u =', u.get_value()))
0059 print(('v =', v.get_value()))