Reputation: 1
I've written a code in python which implements the Newton-Raphson method to solve multiple nonlinear equations.
The specific question I've taken is from Mark Newman's - Computational Physics, exercise 6.17 Nonlinear circuits
import numpy as np
from numpy.linalg import solve, norm
from math import e
#DATA
vp= 5. #V_plus in volts
r1, r2, r3, r4 = 1., 4., 3., 2. #k-ohm resistances
i = 3. #A constant originally in nA
vt = 0.05 #V_t in volts
def f(x):
'''
evaluates f(x) for where x is a 2-dim vector of voltage v1 and v2
'''
v1, v2 = x[0], x[1]
y = np.array([(v1-vp)/r1 + v1/r2 + i*(e**((v1-v2)/vt)-1), (vp-v2)/r3 - v2/r4 + i*(e**((v1-v2)/vt)-1)])
print y
return y
def gradf(x):
'''
n-Derivative of f(x) where x is a vector of n-dimensions
'''
v1, v2 = x[0], x[1]
m = np.array([[1./r1 + 1./r2 + (i/vt)*e**((v1-v2)/vt), (i/vt)*e**((v1-v2)/vt)],\
[(-i/vt)*e**((v1-v2)/vt), -1*(1./r3 +1./r4 +(i/vt)*e**((v1-v2)/vt))]], dtype = np.float64)#the matrix for the 'grad' f function
print m
return m
def cls_newton(x):
'''
Classroom implementation of the newton raphson method
'''
v1, v2 = x[0], x[1]
f_v1 = 1./r1 + 1./r2 + (i/vt)*e**((v1-v2)/vt)
f_v2 = (-i/vt)*e**((v1-v2)/vt)
g_v1 = (i/vt)*e**((v1-v2)/vt)
g_v2 = -1*(1./r3 +1./r4 +(i/vt)*e**((v1-v2)/vt))
f = (v1-vp)/r1 + v1/r2 + i*(e**((v1-v2)/vt)-1)
g = (vp-v2)/r3 - v2/r4 + i*(e**((v1-v2)/vt)-1)
print f
print g
print f_v2, g_v1, g_v1, f_v1
v1n = v1 - (f*g_v2 - g*f_v2)/(f_v1*g_v2 - g_v1*f_v2)
v2n = v2 - (g*f_v1 - f*g_v1)/(f_v1*g_v2 - g_v1*f_v2)
print v1n
print v2n
return np.array([v1n,v2n])
x1 = np.array([4., 5.]) #initial guess of roots are 4. and 5. volts
error = 1e-6 # permissible error
i = 0 # iteration counter
while norm(x1)>error and i < 50:
delta = solve(gradf(x1), f(x1))
x2 = x1 - delta
print x2
print 'x1 = {0}, x2 = {1}'.format(x1, x2)# test line
x1 = x2
print x1
i+=1
rt = x1 # estimated root of the equation
print 'The root of the equation is ' + str(rt) + '\n' + 'f(root) = ' + str(f(rt))
print 'No. of iterations: ' + str(i)
In this code I've written functions for two different implementations of the method for multiple roots.
The one which I've used in this program is one where I solve an equation between gradf(x) (Which produces a Jacobian matrix) and f(x) (Which gives me the a vector with the equations which I found using Kirchoff's laws).
it works like gradf(x).delta = f(x) so we find delta using the solve() function and we subtract delta from x1 (our initial v1 and v2) to find x2
I'm having a problem with the matrix though, when I call the function gradf([4.,5.]) in Ipython, it gives me a matrix like
array([[ 1.25000004e+00, 4.12230724e-08],
[ -4.12230724e-08, -8.33333375e-01]])
but the same matrix when printed during the normal operation of the program is something like
[[ 1.25 0. ]
[ 0. -0.83333333]]
I get this matrix in my first iteration regardless of the initial guess of v1 and v2 (or x1). The next iteration gives me an error like
LinAlgError: Singular matrix .
I don't think this is due to rounding off in Python either because when I individually print the value of (say) the first array element in the matrix (while running the script), it gives me a zero where it should be giving something like 4.12230724e-08.
The classroom implementation or cls_newton(x) which simplifies the equations before-hand and directly gives me x2 seems to do the same thing but I can't tell why, it gives me a different answer through Ipython and a different one during execution.
Also, when I write say f_v1 I'm referring to the partial derivative of f with respect to v1 and g_v2 would be the partial derivative of g with respect to v2 and so on.
Thank you for your help in advance!
Upvotes: 0
Views: 1932
Reputation: 11
I know this is a really late response. If you're still interested in getting the answer, I think this code should do the trick. It gave me the correct answer for the last part to the question. Let me know if you have any questions.
import numpy as np
Vp = 5 #V
R1 = 1e3 # ohms
R2 = 4e3 # ohms
R3 = 3e3 # ohms
R4 = 2e3 # ohms
I0 = 3e-9 # A
VT = 0.05 # V
tol = 1e-6
def f1(V):
v1 = V[0]; v2 = V[1]
return( (v1-Vp)/R1 + v1/R2 + I0*(np.exp((v1-v2)/VT)-1) )
def f2(V):
v1 = V[0]; v2 = V[1]
return( -(v2-Vp)/R3 - v2/R4 + I0*(np.exp((v1-v2)/VT)-1) )
def j11(V):
v1 = V[0]; v2 = V[1]
return 1/R1 + 1/R2 + I0/VT * np.exp((v1-v2)/VT)
def j12(V):
v1 = V[0]; v2 = V[1]
return -I0/VT * np.exp((v1-v2)/VT)
def j21(V):
v1 = V[0]; v2 = V[1]
return I0/VT * np.exp((v1-v2)/VT)
def j22(V):
v1 = V[0]; v2 = V[1]
return -1/R3 - 1/R4 - I0/VT * np.exp((v1-v2)/VT)
# initial guesses
v1 = 0.5
v2 = 0.5
V = np.array( [v1,v2] )
F = np.array( [ f1( V ) , f2( V ) ] )
J = np.array( [ [ j11( V ) , j12( V ) ] , [ j21( V ) , j22( V )] ] )
DV = np.dot( np.linalg.inv(J) , F )
estimate = V - DV
err = np.abs(estimate - V)
while ( err > tol ).any():
F = np.array( [ f1(estimate) , f2(estimate) ] )
J = np.array( [ [ j11(estimate) , j12(estimate) ] , [ j21(estimate) , j22(estimate) ] ] )
DV = np.dot( np.linalg.inv(J) , F ) # f(x)/f'(x)
new_estimate = estimate - DV
err = np.abs(new_estimate - estimate)
estimate = new_estimate
print("V1={:.4E}V\tV2={:.4E}V".format(*new_estimate))
print("Voltage across forward biased diode: {:.4E}V".format(new_estimate[0]-new_estimate[1]))
Upvotes: 1