rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
def subplot(Numy,Numx,win=0,lm=0*inches,rm=0*inches,tm=0*inches,bm=0*inches,ph=11*inches,pw=8.5*inches,dpi=75,ls=0.75*inches,rs=0.75*inches,ts=0.75*inches,bs=0.75*inches):
def subplot(Numy,Numx,win=0,lm=0*inches,rm=0*inches,tm=0*inches,bm=0*inches,ph=11*inches,pw=8.5*inches,dpi=75,ls=0.75*inches,rs=0.75*inches,ts=0.75*inches,bs=0.75*inches,color='black',frame=0): if type(color) is types.StringType: color = _colornum[color]
def subplot(Numy,Numx,win=0,lm=0*inches,rm=0*inches,tm=0*inches,bm=0*inches,ph=11*inches,pw=8.5*inches,dpi=75,ls=0.75*inches,rs=0.75*inches,ts=0.75*inches,bs=0.75*inches): # Use gist.plsys to change coordinate systems systems=[] ind = -1 Yspace = (ph-bm-tm)/float(Numy) Xspace = (pw-rm-lm)/float(Numx) for nY in range(Numy): ystart = (ph-tm) - (nY+1)*Yspace + bs for nX in range(Numx): xstart = lm + nX*Xspace + ls systems.append({}) systems[-1]['viewport'] = [xstart,xstart+Xspace-(ls+rs),ystart,ystart+Yspace-(ts+bs)] _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(8.5*dpi),height=int(11*dpi),dpi=dpi)
d31b1aed025e9765ed229fd77d6341f36f1473cc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d31b1aed025e9765ed229fd77d6341f36f1473cc/Mplot.py
change_palette()
change_palette(palette)
def surf(x,y,z,win=None,shade=0,edges=1,edge_color="black",phi=-45,theta=30, zscale=1.0,palette=None,gnomon=0): """Plot a three-dimensional wire-frame (surface): z=f(x,y) """ if win is None: pl3d.window3() else: pl3d.window3(win) pl3d.set_draw3_(0) pl3d.orient3(phi=phi*pi/180,theta=theta*pi/180) pl3d.light3() change_palette() plwf.plwf(z,y,x,shade=shade,edges=edges,ecolor=edge_color,scale=zscale) [xmin,xmax,ymin,ymax] = pl3d.draw3(1) gist.limits(xmin,xmax,ymin,ymax) pl3d.gnomon(gnomon)
d31b1aed025e9765ed229fd77d6341f36f1473cc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d31b1aed025e9765ed229fd77d6341f36f1473cc/Mplot.py
derphi_aj = derphi(a_j)
derphi_aj = derphi(a_j)
def zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2): maxiter = 10 i = 0 delta1 = 0.2 # cubic interpolant check delta2 = 0.1 # quadratic interpolant check phi_rec = phi0 a_rec = 0 while 1: # interpolate to find a trial step length between a_lo and a_hi # Need to choose interpolation here. Use cubic interpolation and then if the # result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too close, then use bisection dalpha = a_hi-a_lo; if dalpha < 0: a,b = a_hi,a_lo else: a,b = a_lo, a_hi # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # if the result is too close to the end points (or out of the interval) # then use quadratic interpolation with phi_lo, derphi_lo and phi_hi # if the result is stil too close to the end points (or out of the interval) # then use bisection if (i > 0): cchk = delta1*dalpha a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) if (i==0) or (a_j is None) or (a_j > b-cchk) or (a_j < a+cchk): qchk = delta2*dalpha a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): a_j = a_lo + 0.5*dalpha
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
return a_star, val_star
return a_star, val_star, valprime_star
def zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2): maxiter = 10 i = 0 delta1 = 0.2 # cubic interpolant check delta2 = 0.1 # quadratic interpolant check phi_rec = phi0 a_rec = 0 while 1: # interpolate to find a trial step length between a_lo and a_hi # Need to choose interpolation here. Use cubic interpolation and then if the # result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too close, then use bisection dalpha = a_hi-a_lo; if dalpha < 0: a,b = a_hi,a_lo else: a,b = a_lo, a_hi # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # if the result is too close to the end points (or out of the interval) # then use quadratic interpolation with phi_lo, derphi_lo and phi_hi # if the result is stil too close to the end points (or out of the interval) # then use bisection if (i > 0): cchk = delta1*dalpha a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) if (i==0) or (a_j is None) or (a_j > b-cchk) or (a_j < a+cchk): qchk = delta2*dalpha a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): a_j = a_lo + 0.5*dalpha
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
global fc, gc fc = 0 gc = 0
global _ls_fc, _ls_gc, _ls_ingfk _ls_fc = 0 _ls_gc = 0 _ls_ingfk = None
def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, args=(), c1=1e-4, c2=0.9, amax=50): """Find alpha that satisfies strong Wolfe conditions. Uses the line search algorithm to enforce strong Wolfe conditions Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60 For the zoom phase it uses an algorithm by Outputs: (alpha0, gc, fc) """ global fc, gc fc = 0 gc = 0 def phi(alpha): global fc fc += 1 return f(xk+alpha*pk,*args) if isinstance(myfprime,type(())): def phiprime(alpha): global fc fc += len(xk)+1 eps = myfprime[1] fprime = myfprime[0] newargs = (f,eps) + args return Num.dot(fprime(xk+alpha*pk,*newargs),pk) else: fprime = myfprime def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk) alpha0 = 0 phi0 = old_fval derphi0 = Num.dot(gfk,pk) alpha1 = pymin(1.0,1.01*2*(phi0-old_old_fval)/derphi0) phi_a1 = phi(alpha1) #derphi_a1 = phiprime(alpha1) evaluated below phi_a0 = phi0 derphi_a0 = derphi0 i = 1 maxiter = 10 while 1: # bracketing phase if (phi_a1 > phi0 + c1*alpha1*derphi0) or \ ((phi_a1 >= phi_a0) and (i > 1)): alpha_star, fval_star = zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, phiprime, phi0, derphi0, c1, c2) break derphi_a1 = phiprime(alpha1) if (abs(derphi_a1) <= -c2*derphi0): alpha_star = alpha1 fval_star = phi_a1 break if (derphi_a1 >= 0): alpha_star, fval_star = zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, phiprime, phi0, derphi0, c1, c2) break alpha2 = 2 * alpha1 # increase by factor of two on each iteration i = i + 1 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi(alpha1) derphi_a0 = derphi_a1 # stopping test if lower function not found if (i > maxiter): alpha_star = alpha1 fval_star = phi_a1 break return alpha_star, fc, gc, fval_star, old_fval
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
global fc fc += 1
global _ls_fc _ls_fc += 1
def phi(alpha): global fc fc += 1 return f(xk+alpha*pk,*args)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
global fc fc += len(xk)+1
global _ls_fc, _ls_ingfk _ls_fc += len(xk)+1
def phiprime(alpha): global fc fc += len(xk)+1 eps = myfprime[1] fprime = myfprime[0] newargs = (f,eps) + args return Num.dot(fprime(xk+alpha*pk,*newargs),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
return Num.dot(fprime(xk+alpha*pk,*newargs),pk)
_ls_ingfk = fprime(xk+alpha*pk,*newargs) return Num.dot(_ls_ingfk,pk)
def phiprime(alpha): global fc fc += len(xk)+1 eps = myfprime[1] fprime = myfprime[0] newargs = (f,eps) + args return Num.dot(fprime(xk+alpha*pk,*newargs),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
global _ls_gc, _ls_ingfk _ls_gc += 1 _ls_ingfk = fprime(xk+alpha*pk,*args) return Num.dot(_ls_ingfk,pk)
def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
alpha_star, fval_star = zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, phiprime, phi0, derphi0, c1, c2)
alpha_star, fval_star, fprime_star = \ zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, phiprime, phi0, derphi0, c1, c2)
def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
alpha_star, fval_star = zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, phiprime, phi0, derphi0, c1, c2)
alpha_star, fval_star, fprime_star = \ zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, phiprime, phi0, derphi0, c1, c2)
def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
return alpha_star, fc, gc, fval_star, old_fval
if fprime_star is not None: fprime_star = _ls_ingfk return alpha_star, _ls_fc, _ls_gc, fval_star, old_fval, fprime_star
def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
alpha_k, fc, gc, old_fval, old_old_fval = \
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1
if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
xk = x0
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk old_fval = f(xk,*args) old_old_fval = old_fval + 5000 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
old_fval = f(xk,*args) old_old_fval = old_fval + 5000
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk old_fval = f(xk,*args) old_old_fval = old_fval + 5000 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
alpha_k, fc, gc, old_fval, old_old_fval = \
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk old_fval = f(xk,*args) old_old_fval = old_fval + 5000 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1
if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk old_fval = f(xk,*args) old_old_fval = old_fval + 5000 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
fval = apply(f,(xk,)+args)
fval = old_fval
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk old_fval = f(xk,*args) old_old_fval = old_fval + 5000 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/18d62e07c7b0b8dd2d3680a3b704bd36ff6ae89b/optimize.py
xsupi = 0
xsupi = zeros(len(x0), x0.typecode())
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = 0 ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if (curv <= 0): if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
c18af3409c18c2b6d29bcf8c8977181b040d36d2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c18af3409c18c2b6d29bcf8c8977181b040d36d2/optimize.py
if (curv <= 0):
if curv == 0.0: break elif curv < 0:
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = 0 ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if (curv <= 0): if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
c18af3409c18c2b6d29bcf8c8977181b040d36d2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c18af3409c18c2b6d29bcf8c8977181b040d36d2/optimize.py
if abs(p-p0) < tol:
if abs(p-p1) < tol:
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50): """Given a function of a single variable and a starting point, find a nearby zero using Newton-Raphson. fprime is the derivative of the function. If not given, the Secant method is used. """ if fprime is not None: p0 = x0 for iter in range(maxiter): myargs = (p0,)+args fval = func(*myargs) fpval = fprime(*myargs) if fpval == 0: print "Warning: zero-derivative encountered." return p0 p = p0 - func(*myargs)/fprime(*myargs) if abs(p-p0) < tol: return p p0 = p else: # Secant method p0 = x0 p1 = x0*(1+1e-4) q0 = apply(func,(p0,)+args) q1 = apply(func,(p1,)+args) for iter in range(maxiter): try: p = p1 - q1*(p1-p0)/(q1-q0) except ZeroDivisionError: if p1 != p0: print "Tolerance of %g reached" % (p1-p0) return (p1+p0)/2.0 if abs(p-p0) < tol: return p p0 = p1 q0 = q1 p1 = p q1 = apply(func,(p1,)+args) raise RuntimeError, "Failed to converge after %d iterations, value is %f" % (maxiter,p)
bc1289785603ce752fdf4b390e8156d8b8555638 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bc1289785603ce752fdf4b390e8156d8b8555638/minpack.py
assert_array_almost_equal(f(3,[3],[-4]),[-36])
assert_array_almost_equal(f(3,[3],[-4]),[[-36]])
def check_gemm(self): for p in 'sd': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3,[3],[-4]),[-36]) assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]]) for p in 'cz': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j]) assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]])
7e71688ec03fc1369210cbb8183a43673560809c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7e71688ec03fc1369210cbb8183a43673560809c/test_blas.py
assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j])
assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]])
def check_gemm(self): for p in 'sd': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3,[3],[-4]),[-36]) assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]]) for p in 'cz': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j]) assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) assert_array_almost_equal(f(1,[[1,2]],[[3,3],[4,4]]),[[11,11]])
7e71688ec03fc1369210cbb8183a43673560809c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7e71688ec03fc1369210cbb8183a43673560809c/test_blas.py
assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[11])
assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[[11]])
def check_gemm2(self): for p in 'sdcz': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[11]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]])
7e71688ec03fc1369210cbb8183a43673560809c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7e71688ec03fc1369210cbb8183a43673560809c/test_blas.py
The entropy dual function equals the negative log likelihood.
The entropy dual function is proportional to the negative log likelihood.
def dual(self, params=None, ignorepenalty=False): """The entropy dual function is defined for conditional models as L(theta) = sum_w q(w) log Z(w; theta) - sum_{w,x} q(w,x) [theta . f(w,x)]
2b410e32da15949ab05dc2ca96d760da0485cce5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/2b410e32da15949ab05dc2ca96d760da0485cce5/maxentropy.py
var = (a*b*1.0)*(a+b+1.0)/(a+b)**2.0
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
def _stats(self, a, b): mn = a *1.0 / (a + b) var = (a*b*1.0)*(a+b+1.0)/(a+b)**2.0 g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b) g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b)) g2 /= a*b*(a+b+2)*(a+b+3) return mn, var, g1, g2
37623a23e1c2341836cbdef3c7a8f5cb45ad2b10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/37623a23e1c2341836cbdef3c7a8f5cb45ad2b10/distributions.py
return Date(freq, yaer=tempDate.year, quarter=monthToQuarter(tempDate.month))
return Date(freq, year=tempDate.year, quarter=monthToQuarter(tempDate.month))
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, mxDate=tempDate) elif freq == 'M': return Date(freq, year=tempDate.year, month=tempDate.month) elif freq == 'Q': return Date(freq, yaer=tempDate.year, quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq, year=tempDate.year)
9297846990ea0107d5b2dd438e6c1d184fea05b8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9297846990ea0107d5b2dd438e6c1d184fea05b8/tsdate.py
def sum(self, axis=None): # Override the base class sum method for efficiency in the cases # axis=0 and axis=None. m, n = self.shape data = self.data if axis in (0, None): indptr = self.indptr out = empty(n, dtype=self.dtype) # The first element in column j has index indptr[j], the last # indptr[j+1] for j in xrange(n): out[j] = data[indptr[j] : indptr[j+1]].sum() if axis==0: # Output is a (1 x n) dense matrix return asmatrix(out) else: return out.sum() else: # Was: return spmatrix.sum(self, axis) rowind = self.rowind out = zeros(m, dtype=self.dtype) # Loop over non-zeros for k in xrange(self.nnz): out[rowind[k]] += data[k] # Output is a (m x 1) dense matrix return asmatrix(out).T
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
def sum(self, axis=None): # Override the base class sum method for efficiency in the cases # axis=1 and axis=None. m, n = self.shape data = self.data if axis in (1, None): out = empty(m, dtype=self.dtype) # The first element in row i has index indptr[i], the last # indptr[i+1] indptr = self.indptr for i in xrange(m): out[i] = data[indptr[i] : indptr[i+1]].sum() if axis==1: # Output is a (m x 1) dense matrix return asmatrix(out).T else: return out.sum() else: # Was: return spmatrix.sum(self, axis) colind = self.colind out = zeros(n, dtype=self.dtype) # Loop over non-zeros for k in xrange(self.nnz): out[colind[k]] += data[k] # Output is a (1 x n) dense matrix return asmatrix(out)
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
x = asarray(x).squeeze()
x = asarray(x)
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
if len(x) != len(seq): raise ValueError, "number of elements in source" \ " must be same as number of elements in" \ " destimation or 1"
else: if x.ndim == 2: if x.shape != (1, len(seq)): raise ValueError, \ "source and destination have incompatible "\ "dimensions" else: x = x.squeeze()
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
def __mul__(self, other): # self * other if isscalar(other) or (isdense(other) and rank(other)==0): # Was: new = lil_matrix(self.shape, dtype=self.dtype) new = self.copy() if other == 0: # Multiply by zero: return the zero matrix return new # Multiply this scalar by every element. new.data = [[val * other for val in rowvals] for rowvals in new.data] return new else: return self.dot(other)
9d0e035693e0e9fbbcd4c00deced4aea257fbed5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9d0e035693e0e9fbbcd4c00deced4aea257fbed5/sparse.py
fsim[0] = apply(func,(x0,)+args)
fsim[0] = func(x0)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
f = apply(func,(y,)+args)
f = func(y)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
funcalls = N+1 while (funcalls < maxfun and iterations < maxiter):
while (fcalls[0] < maxfun and iterations < maxiter):
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fxr = apply(func,(xr,)+args) funcalls = funcalls + 1
fxr = func(xr)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fxe = apply(func,(xe,)+args) funcalls = funcalls + 1
fxe = func(xe)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fxc = apply(func,(xc,)+args) funcalls = funcalls + 1
fxc = func(xc)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1
fxcc = func(xcc)
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N
fsim[j] = func(sim[j])
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if funcalls >= maxfun:
if fcalls[0] >= maxfun:
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % funcalls
print " Function evaluations: %d" % fcalls[0]
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
retlist = x, fval, iterations, funcalls, warnflag
retlist = x, fval, iterations, fcalls[0], warnflag
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, iter, funcalls, warnflag}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- acceptable relative error in xopt for convergence. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return list of solutions at each iteration """ x0 = asfarray(x0) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1; chi = 2; psi = 0.5; sigma = 0.5; one2np1 = range(1,N+1) if rank == 0: sim = Num.zeros((N+1,),x0.typecode()) else: sim = Num.zeros((N+1,N),x0.typecode()) fsim = Num.zeros((N+1,),'d') sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = apply(func,(x0,)+args) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0,N): y = Num.array(x0,copy=1) if y[k] != 0: y[k] = (1+nonzdelt)*y[k] else: y[k] = zdelt sim[k+1] = y f = apply(func,(y,)+args) fsim[k+1] = f ind = Num.argsort(fsim) fsim = Num.take(fsim,ind) # sort so sim[0,:] has the lowest function value sim = Num.take(sim,ind,0) iterations = 1 funcalls = N+1 while (funcalls < maxfun and iterations < maxiter): if (max(Num.ravel(abs(sim[1:]-sim[0]))) <= xtol \ and max(abs(fsim[0]-fsim[1:])) <= ftol): break xbar = Num.add.reduce(sim[:-1],0) / N xr = (1+rho)*xbar - rho*sim[-1] fxr = apply(func,(xr,)+args) funcalls = funcalls + 1 doshrink = 0 if fxr < fsim[0]: xe = (1+rho*chi)*xbar - rho*chi*sim[-1] fxe = apply(func,(xe,)+args) funcalls = funcalls + 1 if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1+psi*rho)*xbar - psi*rho*sim[-1] fxc = apply(func,(xc,)+args) funcalls = funcalls + 1 if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink=1 else: # Perform an inside contraction xcc = (1-psi)*xbar + psi*sim[-1] fxcc = apply(func,(xcc,)+args) funcalls = funcalls + 1 if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma*(sim[j] - sim[0]) fsim[j] = apply(func,(sim[j],)+args) funcalls = funcalls + N ind = Num.argsort(fsim) sim = Num.take(sim,ind,0) fsim = Num.take(fsim,ind) iterations = iterations + 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = min(fsim) warnflag = 0 if funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % funcalls if full_output: retlist = x, fval, iterations, funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
app_fprime = 0 if fprime is None: app_fprime = 1
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls = 0 grad_calls = 0
func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval = f(x0,*args)
old_fval = f(x0)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval,old_old_fval,args=args)
old_fval,old_old_fval)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls = func_calls + fc grad_calls = grad_calls + gc
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval,old_old_fval,args=args)
old_fval,old_old_fval)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls = func_calls + fc grad_calls = grad_calls + gc
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1
gfkp1 = myfprime(xkp1)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag
retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
app_fprime = 0 if fprime is None: app_fprime = 1
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls = 0 grad_calls = 0
func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval = f(xk,*args)
old_fval = f(xk)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_old_fval,args=args,c2=0.4)
old_old_fval,c2=0.4)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
func_calls += fc grad_calls += gc
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc
old_fval,old_old_fval)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1
gfkp1 = myfprime(xk)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
retlist = xk, fval, func_calls, grad_calls, warnflag
retlist = xk, fval, func_calls[0], grad_calls[0], warnflag
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fcalls = 0 gcalls = 0
fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
old_fval = f(x0,*args) fcalls += 1
old_fval = f(x0)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1
b = -fprime(xk)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2
Ap = approx_fhess_p(xk,psupi,fprime,epsilon)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
Ap = apply(fhess_p,(xk,psupi)+args)
Ap = fhess_p(xk,psupi, *args)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc
alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls
print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0]
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls
print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0]
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
retlist = xk, fval, fcalls, gcalls, hcalls, warnflag
retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
si = Numeric.sign(xm-xf) + ((xm-xf)==0)
si = Num.sign(xm-xf) + ((xm-xf)==0)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Description: Finds a local minimizer of the scalar function func in the interval x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). Inputs: func -- the function to be minimized (must accept scalar input and return scalar output). x1, x2 -- the optimization bounds. args -- extra arguments to pass to function. xtol -- the convergence tolerance. maxfun -- maximum function evaluations. full_output -- Non-zero to return optional outputs. disp -- Non-zero to print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Outputs: (xopt, {fval, ierr, numfunc}) xopt -- The minimizer of the function over the interval. fval -- The function value at the minimum point. ierr -- An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc -- The number of function calls. """ if x1 > x2: raise ValueError, "The lower bound exceeds the upper bound." flag = 0 header = ' Func-count x f(x) Procedure' step=' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5*(3.0-sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean*(b-a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x,*args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol / 3.0 tol2 = 2.0*tol1 if disp > 2: print (" ") print (header) print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): golden = 1 # Check for parabolic fit if abs(e) > tol1: golden = 0 r = (xf-nfc)*(fx-ffulc) q = (xf-fulc)*(fx-fnfc) p = (xf-fulc)*q - (xf-nfc)*r q = 2.0*(q-r) if q > 0.0: p = -p q = abs(q) r = e e = rat # Check for acceptability of parabola if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and (p < q*(b-xf))): rat = (p+0.0) / q; x = xf + rat step = ' parabolic' if ((x-a) < tol2) or ((b-x) < tol2): si = Numeric.sign(xm-xf) + ((xm-xf)==0) rat = tol1*si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e=a-xf else: e=b-xf rat = golden_mean*e step = ' golden' si = Numeric.sign(rat) + (rat == 0) x = xf + si*max([abs(rat), tol1]) fu = func(x,*args) num += 1 fmin_data = (num, x, fu) if disp > 2: print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol/3.0 tol2 = 2.0*tol1 if num >= maxfun: flag = 1 fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
si = Numeric.sign(rat) + (rat == 0)
si = Num.sign(rat) + (rat == 0)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Description: Finds a local minimizer of the scalar function func in the interval x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). Inputs: func -- the function to be minimized (must accept scalar input and return scalar output). x1, x2 -- the optimization bounds. args -- extra arguments to pass to function. xtol -- the convergence tolerance. maxfun -- maximum function evaluations. full_output -- Non-zero to return optional outputs. disp -- Non-zero to print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Outputs: (xopt, {fval, ierr, numfunc}) xopt -- The minimizer of the function over the interval. fval -- The function value at the minimum point. ierr -- An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc -- The number of function calls. """ if x1 > x2: raise ValueError, "The lower bound exceeds the upper bound." flag = 0 header = ' Func-count x f(x) Procedure' step=' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5*(3.0-sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean*(b-a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x,*args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol / 3.0 tol2 = 2.0*tol1 if disp > 2: print (" ") print (header) print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): golden = 1 # Check for parabolic fit if abs(e) > tol1: golden = 0 r = (xf-nfc)*(fx-ffulc) q = (xf-fulc)*(fx-fnfc) p = (xf-fulc)*q - (xf-nfc)*r q = 2.0*(q-r) if q > 0.0: p = -p q = abs(q) r = e e = rat # Check for acceptability of parabola if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and (p < q*(b-xf))): rat = (p+0.0) / q; x = xf + rat step = ' parabolic' if ((x-a) < tol2) or ((b-x) < tol2): si = Numeric.sign(xm-xf) + ((xm-xf)==0) rat = tol1*si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e=a-xf else: e=b-xf rat = golden_mean*e step = ' golden' si = Numeric.sign(rat) + (rat == 0) x = xf + si*max([abs(rat), tol1]) fu = func(x,*args) num += 1 fmin_data = (num, x, fu) if disp > 2: print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol/3.0 tol2 = 2.0*tol1 if num >= maxfun: flag = 1 fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
global _powell_funcalls def _myfunc(alpha, func, x0, direc, args=()): funcargs = (x0 + alpha * direc,)+args return func(*funcargs) def _linesearch_powell(func, p, xi, args=(), tol=1e-3):
def _linesearch_powell(func, p, xi, tol=1e-3):
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0): """Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points xa, xb, xc that bracket the minimum of the function: f(xa) > f(xb) < f(xc) """ _gold = 1.618034 _verysmall_num = 1e-21 fa = apply(func, (xa,)+args) fb = apply(func, (xb,)+args) if (fa < fb): # Switch so fa > fb dum = xa; xa = xb; xb = dum dum = fa; fa = fb; fb = dum xc = xb + _gold*(xb-xa) fc = apply(func, (xc,)+args) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa)*(fb-fc) tmp2 = (xb - xc)*(fb-fa) val = tmp2-tmp1 if abs(val) < _verysmall_num: denom = 2.0*_verysmall_num else: denom = 2.0*val w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom wlim = xb + grow_limit*(xc-xb) if iter > 1000: raise RuntimeError, "Too many iterations." if (w-xc)*(xb-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xa = xb; xb=w; fa=fb; fb=fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w; fc=fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(wlim-xc) >= 0.0: w = wlim fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(xc-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xb=xc; xc=w; w=xc+_gold*(xc-xb) fb=fc; fc=fw; fw=apply(func, (w,)+args) funcalls += 1 else: w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 xa=xb; xb=xc; xc=w fa=fb; fb=fc; fc=fw return xa, xb, xc, fa, fb, fc, funcalls
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol)
def myfunc(alpha): return func(p + alpha * xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
def _linesearch_powell(func, p, xi, args=(), tol=1e-3): # line-search algorithm using fminbound # find the minimium of the function # func(x0+ alpha*direc) global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol) xi = alpha_min*xi _powell_funcalls += num return squeeze(fret), p+xi, xi
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
_powell_funcalls += num
def _linesearch_powell(func, p, xi, args=(), tol=1e-3): # line-search algorithm using fminbound # find the minimium of the function # func(x0+ alpha*direc) global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol) xi = alpha_min*xi _powell_funcalls += num return squeeze(fret), p+xi, xi
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
global _powell_funcalls
fcalls, func = wrap_function(func, args)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1
fval = squeeze(func(x))
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100)
fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if _powell_funcalls >= maxfun: break
if fcalls[0] >= maxfun: break
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1
fx2 = squeeze(func(x2))
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100)
fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if _powell_funcalls >= maxfun:
if fcalls[0] >= maxfun:
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
print " Function evaluations: %d" % _powell_funcalls
print " Function evaluations: %d" % fcalls[0]
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
retlist = x, fval, direc, iter, _powell_funcalls, warnflag
retlist = x, fval, direc, iter, fcalls[0], warnflag
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if __name__ == "__main__": import string
def main():
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py
if __name__ == "__main__": main()
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
926e615eebcd9f2ca3373b1887f74b532b10bda4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/926e615eebcd9f2ca3373b1887f74b532b10bda4/optimize.py