Jeremy Knight Math 639 3/29/11 Algorithm 10.3
<Text-field style="Heading 1" layout="Heading 1">SteepestDescent(n,x0,f,TOL,N)</Text-field> To approximate a solution p to the minimization problem LUklbXJvd0c2Iy9JK21vZHVsZW5hbWVHNiJJLFR5cGVzZXR0aW5nR0koX3N5c2xpYkdGJzYqLUkjbWlHRiQ2JVEiZ0YnLyUnaXRhbGljR1EldHJ1ZUYnLyUsbWF0aHZhcmlhbnRHUSdpdGFsaWNGJy1JKG1mZW5jZWRHRiQ2JC1GIzYkLUYsNidRInBGJy8lJWJvbGRHRjFGLy9GM1EsYm9sZC1pdGFsaWNGJy8lK2ZvbnR3ZWlnaHRHUSVib2xkRicvRjNRJ25vcm1hbEYnRkQtSSNtb0dGJDYtUSI9RidGRC8lJmZlbmNlR1EmZmFsc2VGJy8lKnNlcGFyYXRvckdGTC8lKXN0cmV0Y2h5R0ZMLyUqc3ltbWV0cmljR0ZMLyUobGFyZ2VvcEdGTC8lLm1vdmFibGVsaW1pdHNHRkwvJSdhY2NlbnRHRkwvJSdsc3BhY2VHUSwwLjI3Nzc3NzhlbUYnLyUncnNwYWNlR0Zlbi1JJ211bmRlckdGJDYlLUYsNiVRJG1pbkYnL0YwRkxGRC1GIzYoLUYsNiVRInhGJ0YvRjItRkc2LVEifkYnRkRGSkZNRk9GUUZTRlVGVy9GWlEmMC4wZW1GJy9GZ25GaG8tRkc2LVElJmluO0YnRkRGSkZNRk9GUUZTRlVGV0ZZRmZuLUklbXN1cEdGJDYlLUYsNiVRKCYjODQ3NztGJ0Zeb0ZELUYjNiUtRiw2JVEibkYnRi9GMkYvRjIvJTFzdXBlcnNjcmlwdHNoaWZ0R1EiMEYnRi9GMi8lLGFjY2VudHVuZGVyR0ZMRistRjY2JC1GIzYkLUYsNidGY29GPUZeby9GM0ZDRkFGREZERmRvRkQ= given an initial approximation x. SteepestDescent:=proc(n,x0,f,TOL,N) #n=number of variables, x0 = 1xn array of initial approximation #F:=Array of Functions #TOL=tolerance, N = maximim iterations local a,h,i,j,k,g,g0,G,alpha,alpha0,x,z,z0,L2Norm,Jacobian,gradientG,delG: option trace: ##################### Subroutine Section ##################### g:=proc(x::Array) local i,temp: temp:=0: for i from 1 to n do temp:=temp+(f[i](x))^2: end do: return(temp): end proc: Jacobian:=proc(x::Array) #Approximates the value of the Jacobian at x #F is an array of functions, x is an array of values local i,j,e,h,J: J:=Array(1..n,1..n): e:=Array(1..n): h:=.0001: for i from 1 to n do for j from 1 to n do e[j]:=h: J[i,j]:=(f[i](x+e)-f[i](x-e))/(2*h): e[j]:=0: end do: end do: return(J): end proc: gradientG:=proc(x::Array) local i,j,F,J,temp: #del{g(x)}=2*J(x)^t*F(x); #First calculate the Jacobian J(x) J:=Jacobian(x): delG:=Array(1..n): for i from 1 to n do F[i]:=f[i](x): end do: for i from 1 to n do for j from 1 to n do delG[i]:=delG[i]+2*F[j]*J[j,i]: end do: end do: return(delG): end proc: L2Norm:=proc(x::Array) #calculates the l2 norm of vector x local i,temp: temp:=0: for i from 1 to n do temp:=temp+x[i]^2: end do: temp:=sqrt(temp): return(temp): end proc: ##################### End Subroutine Section ##################### G:=Array(1..3): h:=Array(1..3): alpha:=Array(1..3): k:=1: x:=Array(1..n): for i from 1 to n do x[i]:=x0[i]: end do: while k<=N do G[1]:=g(x): z:=gradientG(x): z0:=L2Norm(z): if z0=0 then print("Zero Gradient"): print(x,G[1]): print("Might have a minimum"): stop: end if: z:=z/z0: alpha[1]:=0: alpha[3]:=1: G[3]:=g(x-alpha[3]*z): while G[3]>=G[1] do alpha[3]:=alpha[3]/2: G[3]:=g(x-alpha[3]*z): if alpha[3]<TOL/2 then print("No likely improvement"): print(x, G[1]): print("Might have a minimum"): stop: end if: end do: alpha[2]:=alpha[3]/2: G[2]:=g(x-alpha[2]*z): #Newton's forward divided-difference formula is used to find #P(alpha)=G[1]+h[1]*alpha+h[3]alpha(alpha-alpha[2]) #interpolates h[alpha] at alpha=0,alpha=alpha[2], alpha= h[1]:=(G[2]-G[1])/alpha[2]: h[2]:=(G[3]-G[2])/(alpha[3]-alpha[2]): h[3]:=(h[3]-h[1])/alpha[3]: #the critical point of P occurs at alpha0: alpha0:=0.5*(alpha[2]-h[1]/h[3]): g0:=g(x-alpha0*z): if g0<G[3] then a:=alpha0: else a:=alpha[3]: end if: x:=x-alpha*z: print("***",k, x , g(x)): if abs(g(x)-G[1])<TOL then print("The Procedure was successful"): print(x, g(x)): stop: end if: k:=k+1: end do: if k>N then print("Maximum iterations exceeded"): end if: end proc: