Mercurial > lbo > hg > autodiff
changeset 7:9ffcd727ea5d
Move examples into subdirectory
author | Lewin Bormann <lbo@spheniscida.de> |
---|---|
date | Mon, 27 Dec 2021 15:58:27 +0100 |
parents | 828857591bb6 |
children | f71ade2784c4 |
files | example/lv_autograd.py example/lv_tf.py lv.py |
diffstat | 3 files changed, 43 insertions(+), 21 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/example/lv_autograd.py Mon Dec 27 15:58:27 2021 +0100 @@ -0,0 +1,22 @@ +import autograd.numpy as np +from autograd import jacobian + +Nsteps = 1000 + +def LV(arg): + N1, N2 = arg[0:2] + for i in range(1, Nsteps): + _N1 = N1 + N1 += N1 * (arg[2]-arg[4]*N2) + N2 += -N2 * (arg[3]-arg[5]*_N1) + + return np.array([N1, N2]) + +(N1, N2, eps1, eps2, gam1, gam2) = (120., 60., 7e-3, 4e-2, 5e-4, 5e-4) + +print(LV([N1, N2, eps1, eps2, gam1, gam2])) +print(jacobian(LV)(np.array([N1, N2, eps1, eps2, gam1, gam2]))) + + + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/example/lv_tf.py Mon Dec 27 15:58:27 2021 +0100 @@ -0,0 +1,21 @@ +"""Example: Get derivative of iterative Lotka-Volterra differential equation, +using Tensorflow for AD. + +""" +import numpy as np +import tensorflow as tf + + +def LV(N1, N2, eps1, eps2, gam1, gam2): + dt = tf.constant(1.) + states = [(N1, N2)] + for i in range(1, 13): + states.append((states[i-1][0] + (states[i-1][0] * (eps1-gam1*states[i-1][1])) * dt, states[i-1][1] - states[i-1][1] * (eps2-gam2*states[i-1][0])) * dt) + return states[-1] + +with tf.GradientTape(persistent=True) as tape: + (N1, N2, eps1, eps2, gam1, gam2) = arg = [tf.Variable(x) for x in [120., 60., 7e-3, 4e-2, 5e-4, 5e-4]] + (fN1, fN2) = LV(N1, N2, eps1, eps2, gam1, gam2) + +print(tape.jacobian(fN1, arg)) +print(tape.jacobian(fN2, arg))
--- a/lv.py Thu Dec 23 14:28:48 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,21 +0,0 @@ -"""Example: Get derivative of iterative Lotka-Volterra differential equation, -using Tensorflow for AD. - -""" -import numpy as np -import tensorflow as tf - - -def LV(N1, N2, eps1, eps2, gam1, gam2): - dt = tf.constant(1.) - states = [(N1, N2)] - for i in range(1, 13): - states.append((states[i-1][0] + (states[i-1][0] * (eps1-gam1*states[i-1][1])) * dt, states[i-1][1] - states[i-1][1] * (eps2-gam2*states[i-1][0])) * dt) - return states[-1] - -with tf.GradientTape(persistent=True) as tape: - (N1, N2, eps1, eps2, gam1, gam2) = arg = [tf.Variable(x) for x in [120., 60., 7e-3, 4e-2, 5e-4, 5e-4]] - (fN1, fN2) = LV(N1, N2, eps1, eps2, gam1, gam2) - -print(tape.jacobian(fN1, arg)) -print(tape.jacobian(fN2, arg))