changeset 6:828857591bb6

add lv example
author Lewin Bormann <lbo@spheniscida.de>
date Thu, 23 Dec 2021 14:28:48 +0100
parents f508e566dc78
children 9ffcd727ea5d
files gad.py lv.py
diffstat 2 files changed, 30 insertions(+), 5 deletions(-) [+]
line wrap: on
line diff
--- a/gad.py	Thu Dec 23 14:28:30 2021 +0100
+++ b/gad.py	Thu Dec 23 14:28:48 2021 +0100
@@ -39,8 +39,8 @@
         return OpPlus(self, self._autoconv(other))
     def __sub__(self, other):
         return OpMinus(self, self._autoconv(other))
-    def __neg__(self, other):
-        return OpMinus(self.ade.const(0), self._autoconv(other))
+    def __neg__(self):
+        return OpMinus(self.ade.const(0), self._autoconv(self))
     def __mul__(self, other):
         return OpMult(self, self._autoconv(other))
     def __truediv__(self, other):
@@ -223,13 +223,17 @@
 @gradify
 def complex_calculation(x,y,z):
     a = x + y
-    b = x - z
+    b = z - x
     c = a * b
     for i in range(4):
         c = c + a*b
     return c, a, b, a*b
 
 @gradify
+def pres_calculation(x1, x2, x3):
+    return x1*x2 + exp(x1*x3)*cos(x2)
+
+@gradify
 def complex_calculation2(*x):
     y = np.array([x[i]+x[i+1]**2 for i in range(len(x)-1)])
     z = np.array([sqrt(log(e)) for e in y])
@@ -238,11 +242,11 @@
 # ...or automatically using @gradify
 # Equivalent to (without @gradify): print(ade.grad([complex_calculation(x,y,z)], [1,4,5]))
 before = time.time_ns()
-print(complex_calculation(1,4,5))
+print(pres_calculation(1,4,5))
 after = time.time_ns()
 print((after-before)/1e9)
 
 before = time.time_ns()
-print(complex_calculation2(*list(range(1, 10, 2)))[1].shape)
+print(complex_calculation2(*list(range(1, 100, 2)))[1].shape)
 after = time.time_ns()
 print((after-before)/1e9)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lv.py	Thu Dec 23 14:28:48 2021 +0100
@@ -0,0 +1,21 @@
+"""Example: Get derivative of iterative Lotka-Volterra differential equation,
+using Tensorflow for AD.
+
+"""
+import numpy as np
+import tensorflow as tf
+
+
+def LV(N1, N2, eps1, eps2, gam1, gam2):
+    dt = tf.constant(1.)
+    states = [(N1, N2)]
+    for i in range(1, 13):
+        states.append((states[i-1][0] + (states[i-1][0] * (eps1-gam1*states[i-1][1])) * dt, states[i-1][1] - states[i-1][1] * (eps2-gam2*states[i-1][0])) * dt)
+    return states[-1]
+
+with tf.GradientTape(persistent=True) as tape:
+    (N1, N2, eps1, eps2, gam1, gam2) = arg = [tf.Variable(x) for x in [120., 60., 7e-3, 4e-2, 5e-4, 5e-4]]
+    (fN1, fN2) = LV(N1, N2, eps1, eps2, gam1, gam2)
+
+print(tape.jacobian(fN1, arg))
+print(tape.jacobian(fN2, arg))