rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
old_fval,old_old_fval,args=args)
old_fval,old_old_fval)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
func_calls = func_calls + fc grad_calls = grad_calls + gc
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1
gfkp1 = myfprime(xkp1)
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag
retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min) epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) if alpha_k is None: # line search failed try different one. func_calls = func_calls + fc grad_calls = grad_calls + gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk gfk = gfkp1 k = k + 1 gnorm = vecnorm(gfk,ord=norm) if (gnorm <= gtol): break try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break #print "Divide by zero encountered: Hessian calculation reset." #Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
app_fprime = 0 if fprime is None: app_fprime = 1
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
func_calls = 0 grad_calls = 0
func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
old_fval = f(xk,*args)
old_fval = f(xk)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
old_old_fval,args=args,c2=0.4)
old_old_fval,c2=0.4)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
func_calls += fc grad_calls += gc
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc
old_fval,old_old_fval)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1
gfkp1 = myfprime(xk)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls
print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0]
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
retlist = xk, fval, func_calls, grad_calls, warnflag
retlist = xk, fval, func_calls[0], grad_calls[0], warnflag
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,args=args,c2=0.4) if alpha_k is None: # line search failed -- use different one. func_calls += fc grad_calls += gc alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval,args=args) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
fcalls = 0 gcalls = 0
fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
old_fval = f(x0,*args) fcalls += 1
old_fval = f(x0)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1
b = -fprime(xk)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2
Ap = approx_fhess_p(xk,psupi,fprime,epsilon)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
Ap = apply(fhess_p,(xk,psupi)+args)
Ap = fhess_p(xk,psupi, *args)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc
alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval)
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls
print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0]
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
retlist = xk, fval, fcalls, gcalls, hcalls, warnflag
retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0,*args) fcalls += 1 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.typecode()) ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,epsilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update if retall: allvecs.append(xk) k = k + 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls, gcalls, hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
si = Numeric.sign(xm-xf) + ((xm-xf)==0)
si = Num.sign(xm-xf) + ((xm-xf)==0)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Description: Finds a local minimizer of the scalar function func in the interval x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). Inputs: func -- the function to be minimized (must accept scalar input and return scalar output). x1, x2 -- the optimization bounds. args -- extra arguments to pass to function. xtol -- the convergence tolerance. maxfun -- maximum function evaluations. full_output -- Non-zero to return optional outputs. disp -- Non-zero to print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Outputs: (xopt, {fval, ierr, numfunc}) xopt -- The minimizer of the function over the interval. fval -- The function value at the minimum point. ierr -- An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc -- The number of function calls. """ if x1 > x2: raise ValueError, "The lower bound exceeds the upper bound." flag = 0 header = ' Func-count x f(x) Procedure' step=' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5*(3.0-sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean*(b-a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x,*args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol / 3.0 tol2 = 2.0*tol1 if disp > 2: print (" ") print (header) print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): golden = 1 # Check for parabolic fit if abs(e) > tol1: golden = 0 r = (xf-nfc)*(fx-ffulc) q = (xf-fulc)*(fx-fnfc) p = (xf-fulc)*q - (xf-nfc)*r q = 2.0*(q-r) if q > 0.0: p = -p q = abs(q) r = e e = rat # Check for acceptability of parabola if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and (p < q*(b-xf))): rat = (p+0.0) / q; x = xf + rat step = ' parabolic' if ((x-a) < tol2) or ((b-x) < tol2): si = Numeric.sign(xm-xf) + ((xm-xf)==0) rat = tol1*si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e=a-xf else: e=b-xf rat = golden_mean*e step = ' golden' si = Numeric.sign(rat) + (rat == 0) x = xf + si*max([abs(rat), tol1]) fu = func(x,*args) num += 1 fmin_data = (num, x, fu) if disp > 2: print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol/3.0 tol2 = 2.0*tol1 if num >= maxfun: flag = 1 fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf
si = Numeric.sign(rat) + (rat == 0)
si = Num.sign(rat) + (rat == 0)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Description: Finds a local minimizer of the scalar function func in the interval x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). Inputs: func -- the function to be minimized (must accept scalar input and return scalar output). x1, x2 -- the optimization bounds. args -- extra arguments to pass to function. xtol -- the convergence tolerance. maxfun -- maximum function evaluations. full_output -- Non-zero to return optional outputs. disp -- Non-zero to print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Outputs: (xopt, {fval, ierr, numfunc}) xopt -- The minimizer of the function over the interval. fval -- The function value at the minimum point. ierr -- An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc -- The number of function calls. """ if x1 > x2: raise ValueError, "The lower bound exceeds the upper bound." flag = 0 header = ' Func-count x f(x) Procedure' step=' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5*(3.0-sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean*(b-a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x,*args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol / 3.0 tol2 = 2.0*tol1 if disp > 2: print (" ") print (header) print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) ): golden = 1 # Check for parabolic fit if abs(e) > tol1: golden = 0 r = (xf-nfc)*(fx-ffulc) q = (xf-fulc)*(fx-fnfc) p = (xf-fulc)*q - (xf-nfc)*r q = 2.0*(q-r) if q > 0.0: p = -p q = abs(q) r = e e = rat # Check for acceptability of parabola if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and (p < q*(b-xf))): rat = (p+0.0) / q; x = xf + rat step = ' parabolic' if ((x-a) < tol2) or ((b-x) < tol2): si = Numeric.sign(xm-xf) + ((xm-xf)==0) rat = tol1*si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e=a-xf else: e=b-xf rat = golden_mean*e step = ' golden' si = Numeric.sign(rat) + (rat == 0) x = xf + si*max([abs(rat), tol1]) fu = func(x,*args) num += 1 fmin_data = (num, x, fu) if disp > 2: print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5*(a+b) tol1 = sqrt_eps*abs(xf) + xtol/3.0 tol2 = 2.0*tol1 if num >= maxfun: flag = 1 fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xtol, disp) if full_output: return xf, fval, flag, num else: return xf
global _powell_funcalls def _myfunc(alpha, func, x0, direc, args=()): funcargs = (x0 + alpha * direc,)+args return func(*funcargs) def _linesearch_powell(func, p, xi, args=(), tol=1e-3):
def _linesearch_powell(func, p, xi, tol=1e-3):
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0): """Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points xa, xb, xc that bracket the minimum of the function: f(xa) > f(xb) < f(xc) """ _gold = 1.618034 _verysmall_num = 1e-21 fa = apply(func, (xa,)+args) fb = apply(func, (xb,)+args) if (fa < fb): # Switch so fa > fb dum = xa; xa = xb; xb = dum dum = fa; fa = fb; fb = dum xc = xb + _gold*(xb-xa) fc = apply(func, (xc,)+args) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa)*(fb-fc) tmp2 = (xb - xc)*(fb-fa) val = tmp2-tmp1 if abs(val) < _verysmall_num: denom = 2.0*_verysmall_num else: denom = 2.0*val w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom wlim = xb + grow_limit*(xc-xb) if iter > 1000: raise RuntimeError, "Too many iterations." if (w-xc)*(xb-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xa = xb; xb=w; fa=fb; fb=fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w; fc=fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(wlim-xc) >= 0.0: w = wlim fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(xc-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xb=xc; xc=w; w=xc+_gold*(xc-xb) fb=fc; fc=fw; fw=apply(func, (w,)+args) funcalls += 1 else: w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 xa=xb; xb=xc; xc=w fa=fb; fb=fc; fc=fw return xa, xb, xc, fa, fb, fc, funcalls
global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol)
def myfunc(alpha): return func(p + alpha * xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
def _linesearch_powell(func, p, xi, args=(), tol=1e-3): # line-search algorithm using fminbound # find the minimium of the function # func(x0+ alpha*direc) global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol) xi = alpha_min*xi _powell_funcalls += num return squeeze(fret), p+xi, xi
_powell_funcalls += num
def _linesearch_powell(func, p, xi, args=(), tol=1e-3): # line-search algorithm using fminbound # find the minimium of the function # func(x0+ alpha*direc) global _powell_funcalls extra_args = (func, p, xi, args) alpha_min, fret, iter, num = brent(_myfunc, args=extra_args, full_output=1, tol=tol) xi = alpha_min*xi _powell_funcalls += num return squeeze(fret), p+xi, xi
global _powell_funcalls
fcalls, func = wrap_function(func, args)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1
fval = squeeze(func(x))
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100)
fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
if _powell_funcalls >= maxfun: break
if fcalls[0] >= maxfun: break
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1
fx2 = squeeze(func(x2))
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
if _powell_funcalls >= maxfun:
if fcalls[0] >= maxfun:
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
print " Function evaluations: %d" % _powell_funcalls
print " Function evaluations: %d" % fcalls[0]
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
retlist = x, fval, direc, iter, _powell_funcalls, warnflag
retlist = x, fval, direc, iter, fcalls[0], warnflag
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration """ global _powell_funcalls x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,typecode='d') fval = squeeze(apply(func, (x,)+args)) _powell_funcalls = 1 x1 = x.copy() iter = 0; ilist = range(N) while 1: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if _powell_funcalls >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(apply(func, (x2,)+args)) _powell_funcalls +=1 if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, args=args, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if _powell_funcalls >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % _powell_funcalls x = squeeze(x) if full_output: retlist = x, fval, direc, iter, _powell_funcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
if __name__ == "__main__": import string
def main():
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
if __name__ == "__main__": main()
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
return special.bdtrik(q,n,pr)
vals = scipy.ceil(special.bdtrik(q,n,pr)) temp = special.bdtr(vals-1,n,pr) return where(temp >= q, vals-1, vals)
def binomppf(q, n, pr=0.5): return special.bdtrik(q,n,pr)
return special.bdtrik(1-p,n,pr)
return binomppf(1-p,n,pr)
def binomisf(p, n, pr=0.5): return special.bdtrik(1-p,n,pr)
out = where(lVar > noise, lMean, im)
res = (im - lMean) res *= (1-noise / lVar) res += lMean out = where(lVar < noise, lMean, res)
def wiener(im,mysize=None,noise=None): """Perform a wiener filter on an N-dimensional array. Description: Apply a wiener filter to the N-dimensional array in. Inputs: in -- an N-dimensional array. kernel_size -- A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of kernel_size should be odd. If kernel_size is a scalar, then this scalar is used as the size in each dimension. noise -- The noise-power to use. If None, then noise is estimated as the average of the local variance of the input. Outputs: (out,) out -- Wiener filtered result with the same shape as in. """ im = asarray(im) if mysize is None: mysize = [3] * len(im.shape) mysize = asarray(mysize); # Estimate the local mean lMean = correlate(im,Numeric.ones(mysize),1) / Numeric.product(mysize) # Estimate the local variance lVar = correlate(im**2,Numeric.ones(mysize),1) / Numeric.product(mysize) - lMean**2 # Estimate the noise power if needed. if noise==None: noise = mean(Numeric.ravel(lVar)) out = where(lVar > noise, lMean, im) return out
int dim[2] = {upper-lower, Nx[0]};
npy_intp dim[2] = {upper-lower, Nx[0]};
def setup_bspline_module(): """ Builds an extension module with Bspline basis calculators using weave. """ mod = ext_tools.ext_module('_bspline', compiler='gcc') knots = N.linspace(0,1,11).astype(N.float64) nknots = knots.shape[0] x = N.array([0.4,0.5], N.float64) nx = x.shape[0] m = 4 d = 0 lower = 0 upper = 13 # Bspline code in C eval_code = ''' double *bspline(double **output, double *x, int nx, double *knots, int nknots, int m, int d, int lower, int upper) { int nbasis; int index, i, j, k; double *result, *b, *b0, *b1; double *f0, *f1; double denom; nbasis = upper - lower; result = *((double **) output); f0 = (double *) malloc(sizeof(*f0) * nx); f1 = (double *) malloc(sizeof(*f1) * nx); if (m == 1) { for(i=0; i<nbasis; i++) { index = i + lower; if(index < nknots - 1) { if ((knots[index] != knots[index+1]) && (d <= 0)) { for (k=0; k<nx; k++) { *result = (double) (x[k] >= knots[index]) * (x[k] < knots[index+1]); result++; } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } } else { b = (double *) malloc(sizeof(*b) * (nbasis+1) * nx); bspline(&b, x, nx, knots, nknots, m-1, d-1, lower, upper+1); for(i=0; i<nbasis; i++) { b0 = b + nx*i; b1 = b + nx*(i+1); index = i+lower; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f0[k] = (x[k] - knots[index]) / denom; } } else { for (k=0; k<nx; k++) { f0[k] = (m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f0[k] = 0.; } } index = i+lower+1; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f1[k] = (knots[index+m-1] - x[k]) / denom; } } else { for (k=0; k<nx; k++) { f1[k] = -(m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f1[k] = 0.; } } for (k=0; k<nx; k++) { *result = f0[k]*(*b0) + f1[k]*(*b1); b0++; b1++; result++; } } free(b); } free(f0); free(f1); result = result - nx * nbasis; return(result); } ''' eval_ext_code = ''' int dim[2] = {upper-lower, Nx[0]}; PyArrayObject *basis; double *data; basis = (PyArrayObject *) PyArray_SimpleNew(2, dim, PyArray_DOUBLE); data = (double *) basis->data; bspline(&data, x, Nx[0], knots, Nknots[0], m, d, lower, upper); return_val = (PyObject *) basis; ''' bspline_eval = ext_tools.ext_function('evaluate', eval_ext_code, ['x', 'knots', 'm', 'd', 'lower', 'upper']) mod.add_function(bspline_eval) bspline_eval.customize.add_support_code(eval_code) nq = 18 qx, qw = scipy.special.orthogonal.p_roots(nq) dl = dr = 2 gram_code = ''' double *bspline_prod(double *x, int nx, double *knots, int nknots, int m, int l, int r, int dl, int dr) { double *result, *bl, *br; int k; if (fabs(r - l) <= m) { result = (double *) malloc(sizeof(*result) * nx); bl = (double *) malloc(sizeof(*bl) * nx); br = (double *) malloc(sizeof(*br) * nx); bl = bspline(&bl, x, nx, knots, nknots, m, dl, l, l+1); br = bspline(&br, x, nx, knots, nknots, m, dr, r, r+1); for (k=0; k<nx; k++) { result[k] = bl[k] * br[k]; } free(bl); free(br); } else { for (k=0; k<nx; k++) { result[k] = 0.; } } return(result); } double bspline_quad(double *knots, int nknots, int m, int l, int r, int dl, int dr) /* This is based on scipy.integrate.fixed_quad */ { double *y; double qx[%(nq)d]={%(qx)s}; double qw[%(nq)d]={%(qw)s}; double x[%(nq)d]; int nq=%(nq)d; int k, kk; int lower, upper; double result, a, b, partial; result = 0; /* TO DO: figure out knot span more efficiently */ lower = l - m - 1; if (lower < 0) { lower = 0;} upper = lower + 2 * m + 4; if (upper > nknots - 1) {upper = nknots-1;}
int dim[2] = {Nknots[0]-m, m};
npy_intp dim[2] = {Nknots[0]-m, m};
def setup_bspline_module(): """ Builds an extension module with Bspline basis calculators using weave. """ mod = ext_tools.ext_module('_bspline', compiler='gcc') knots = N.linspace(0,1,11).astype(N.float64) nknots = knots.shape[0] x = N.array([0.4,0.5], N.float64) nx = x.shape[0] m = 4 d = 0 lower = 0 upper = 13 # Bspline code in C eval_code = ''' double *bspline(double **output, double *x, int nx, double *knots, int nknots, int m, int d, int lower, int upper) { int nbasis; int index, i, j, k; double *result, *b, *b0, *b1; double *f0, *f1; double denom; nbasis = upper - lower; result = *((double **) output); f0 = (double *) malloc(sizeof(*f0) * nx); f1 = (double *) malloc(sizeof(*f1) * nx); if (m == 1) { for(i=0; i<nbasis; i++) { index = i + lower; if(index < nknots - 1) { if ((knots[index] != knots[index+1]) && (d <= 0)) { for (k=0; k<nx; k++) { *result = (double) (x[k] >= knots[index]) * (x[k] < knots[index+1]); result++; } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } } else { b = (double *) malloc(sizeof(*b) * (nbasis+1) * nx); bspline(&b, x, nx, knots, nknots, m-1, d-1, lower, upper+1); for(i=0; i<nbasis; i++) { b0 = b + nx*i; b1 = b + nx*(i+1); index = i+lower; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f0[k] = (x[k] - knots[index]) / denom; } } else { for (k=0; k<nx; k++) { f0[k] = (m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f0[k] = 0.; } } index = i+lower+1; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f1[k] = (knots[index+m-1] - x[k]) / denom; } } else { for (k=0; k<nx; k++) { f1[k] = -(m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f1[k] = 0.; } } for (k=0; k<nx; k++) { *result = f0[k]*(*b0) + f1[k]*(*b1); b0++; b1++; result++; } } free(b); } free(f0); free(f1); result = result - nx * nbasis; return(result); } ''' eval_ext_code = ''' int dim[2] = {upper-lower, Nx[0]}; PyArrayObject *basis; double *data; basis = (PyArrayObject *) PyArray_SimpleNew(2, dim, PyArray_DOUBLE); data = (double *) basis->data; bspline(&data, x, Nx[0], knots, Nknots[0], m, d, lower, upper); return_val = (PyObject *) basis; ''' bspline_eval = ext_tools.ext_function('evaluate', eval_ext_code, ['x', 'knots', 'm', 'd', 'lower', 'upper']) mod.add_function(bspline_eval) bspline_eval.customize.add_support_code(eval_code) nq = 18 qx, qw = scipy.special.orthogonal.p_roots(nq) dl = dr = 2 gram_code = ''' double *bspline_prod(double *x, int nx, double *knots, int nknots, int m, int l, int r, int dl, int dr) { double *result, *bl, *br; int k; if (fabs(r - l) <= m) { result = (double *) malloc(sizeof(*result) * nx); bl = (double *) malloc(sizeof(*bl) * nx); br = (double *) malloc(sizeof(*br) * nx); bl = bspline(&bl, x, nx, knots, nknots, m, dl, l, l+1); br = bspline(&br, x, nx, knots, nknots, m, dr, r, r+1); for (k=0; k<nx; k++) { result[k] = bl[k] * br[k]; } free(bl); free(br); } else { for (k=0; k<nx; k++) { result[k] = 0.; } } return(result); } double bspline_quad(double *knots, int nknots, int m, int l, int r, int dl, int dr) /* This is based on scipy.integrate.fixed_quad */ { double *y; double qx[%(nq)d]={%(qx)s}; double qw[%(nq)d]={%(qw)s}; double x[%(nq)d]; int nq=%(nq)d; int k, kk; int lower, upper; double result, a, b, partial; result = 0; /* TO DO: figure out knot span more efficiently */ lower = l - m - 1; if (lower < 0) { lower = 0;} upper = lower + 2 * m + 4; if (upper > nknots - 1) {upper = nknots-1;}
int dim[2] = {NL[0], NL[1]};
npy_intp dim[2] = {NL[0], NL[1]};
def setup_bspline_module(): """ Builds an extension module with Bspline basis calculators using weave. """ mod = ext_tools.ext_module('_bspline', compiler='gcc') knots = N.linspace(0,1,11).astype(N.float64) nknots = knots.shape[0] x = N.array([0.4,0.5], N.float64) nx = x.shape[0] m = 4 d = 0 lower = 0 upper = 13 # Bspline code in C eval_code = ''' double *bspline(double **output, double *x, int nx, double *knots, int nknots, int m, int d, int lower, int upper) { int nbasis; int index, i, j, k; double *result, *b, *b0, *b1; double *f0, *f1; double denom; nbasis = upper - lower; result = *((double **) output); f0 = (double *) malloc(sizeof(*f0) * nx); f1 = (double *) malloc(sizeof(*f1) * nx); if (m == 1) { for(i=0; i<nbasis; i++) { index = i + lower; if(index < nknots - 1) { if ((knots[index] != knots[index+1]) && (d <= 0)) { for (k=0; k<nx; k++) { *result = (double) (x[k] >= knots[index]) * (x[k] < knots[index+1]); result++; } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } else { for (k=0; k<nx; k++) { *result = 0.; result++; } } } } else { b = (double *) malloc(sizeof(*b) * (nbasis+1) * nx); bspline(&b, x, nx, knots, nknots, m-1, d-1, lower, upper+1); for(i=0; i<nbasis; i++) { b0 = b + nx*i; b1 = b + nx*(i+1); index = i+lower; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f0[k] = (x[k] - knots[index]) / denom; } } else { for (k=0; k<nx; k++) { f0[k] = (m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f0[k] = 0.; } } index = i+lower+1; if ((knots[index] != knots[index+m-1]) && (index+m-1 < nknots)) { denom = knots[index+m-1] - knots[index]; if (d <= 0) { for (k=0; k<nx; k++) { f1[k] = (knots[index+m-1] - x[k]) / denom; } } else { for (k=0; k<nx; k++) { f1[k] = -(m-1) / (knots[index+m-1] - knots[index]); } } } else { for (k=0; k<nx; k++) { f1[k] = 0.; } } for (k=0; k<nx; k++) { *result = f0[k]*(*b0) + f1[k]*(*b1); b0++; b1++; result++; } } free(b); } free(f0); free(f1); result = result - nx * nbasis; return(result); } ''' eval_ext_code = ''' int dim[2] = {upper-lower, Nx[0]}; PyArrayObject *basis; double *data; basis = (PyArrayObject *) PyArray_SimpleNew(2, dim, PyArray_DOUBLE); data = (double *) basis->data; bspline(&data, x, Nx[0], knots, Nknots[0], m, d, lower, upper); return_val = (PyObject *) basis; ''' bspline_eval = ext_tools.ext_function('evaluate', eval_ext_code, ['x', 'knots', 'm', 'd', 'lower', 'upper']) mod.add_function(bspline_eval) bspline_eval.customize.add_support_code(eval_code) nq = 18 qx, qw = scipy.special.orthogonal.p_roots(nq) dl = dr = 2 gram_code = ''' double *bspline_prod(double *x, int nx, double *knots, int nknots, int m, int l, int r, int dl, int dr) { double *result, *bl, *br; int k; if (fabs(r - l) <= m) { result = (double *) malloc(sizeof(*result) * nx); bl = (double *) malloc(sizeof(*bl) * nx); br = (double *) malloc(sizeof(*br) * nx); bl = bspline(&bl, x, nx, knots, nknots, m, dl, l, l+1); br = bspline(&br, x, nx, knots, nknots, m, dr, r, r+1); for (k=0; k<nx; k++) { result[k] = bl[k] * br[k]; } free(bl); free(br); } else { for (k=0; k<nx; k++) { result[k] = 0.; } } return(result); } double bspline_quad(double *knots, int nknots, int m, int l, int r, int dl, int dr) /* This is based on scipy.integrate.fixed_quad */ { double *y; double qx[%(nq)d]={%(qx)s}; double qw[%(nq)d]={%(qw)s}; double x[%(nq)d]; int nq=%(nq)d; int k, kk; int lower, upper; double result, a, b, partial; result = 0; /* TO DO: figure out knot span more efficiently */ lower = l - m - 1; if (lower < 0) { lower = 0;} upper = lower + 2 * m + 4; if (upper > nknots - 1) {upper = nknots-1;}
return
return {}
def configuration(parent_package=''): """ gist only works with an X-windows server This will install *.gs and *.gp files to '%spython%s/site-packages/scipy/xplt' % (sys.prefix,sys.version[:3]) """ x11 = x11_info().get_info() if not x11: return config = default_config_dict('xplt',parent_package) local_path = get_path(__name__) sources = ['gistCmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext_arg = {'name':dot_join(parent_package,'xplt.gistC'), 'sources':sources} dict_append(ext_arg,**x11) dict_append(ext_arg,libraries=['m']) ext = Extension (**ext_arg) config['ext_modules'].append(ext) from glob import glob gist = glob(os.path.join(local_path,'gist','*.c')) # libraries are C static libraries config['libraries'].append(('gist',{'sources':gist, 'macros':[('STDC_HEADERS',1)]})) file_ext = ['*.gs','*.gp', '*.ps', '*.help'] xplt_files = [glob(os.path.join(local_path,x)) for x in file_ext] xplt_files = reduce(lambda x,y:x+y,xplt_files,[]) xplt_path = os.path.join(parent_package,'xplt') config['data_files'].extend( [(xplt_path,xplt_files)]) return config
self.log.WriteText("Print Preview failed." \ "Check that default printer is configured\n")
print "Print Preview failed." \ "Check that default printer is configured\n"
def OnFilePreview(self, event): printout = graph_printout(self.client) printout2 = graph_printout(self.client) self.preview = wx.wxPrintPreview(printout, printout2, self.print_data) if not self.preview.Ok(): self.log.WriteText("Print Preview failed." \ "Check that default printer is configured\n") return
f = rv.norm(old,w)[0]
f = rv.norm.rvs(old,w)[0]
def evaluate(self,gene): """ return a new value from the genes allele set """ size = len(gene.allele_set) if size == 1: return gene.allele_set[0] w = self.dev_width * size old = gene.index() new = -1; f = -1 while not (0 <= new < size): f = rv.norm(old,w)[0] new = round(f) if(old == new and f > new): new = new + 1 if(old == new and f < new): new = new - 1 return gene.allele_set[int(new)]
new = rv.norm(gene._value,dev).rvs()[0]
new = rv.norm.rvs(gene._value,dev)[0]
def evaluate(self,gene): dev = (gene.bounds[1]-gene.bounds[0]) * self.dev_width new = gene.bounds[1]
alpha_k, fc, gc, old_fval_backup, old_old_fval_backup, gfkp1 = \
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 N = len(x0) xk = x0 old_fval = f(xk) old_old_fval = old_fval + 5000 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = numpy.dot(gfk,gfk) # These values are modified by the line search, even if it fails old_fval_backup = old_fval old_old_fval_backup = old_old_fval alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,c2=0.4) if alpha_k is None: # line search failed -- use different one. alpha_k, fc, gc, old_fval_backup, old_old_fval_backup, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval) if alpha_k is None or alpha_k == 0: # This line search also failed to find a better solution. warnflag = 2 break xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: gfkp1 = myfprime(xk) yk = gfkp1 - gfk beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] if full_output: retlist = xk, fval, func_calls[0], grad_calls[0], warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
old_fval,old_old_fval)
old_fval_backup,old_old_fval_backup)
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 N = len(x0) xk = x0 old_fval = f(xk) old_old_fval = old_fval + 5000 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = numpy.dot(gfk,gfk) # These values are modified by the line search, even if it fails old_fval_backup = old_fval old_old_fval_backup = old_old_fval alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ linesearch.line_search(f,myfprime,xk,pk,gfk,old_fval, old_old_fval,c2=0.4) if alpha_k is None: # line search failed -- use different one. alpha_k, fc, gc, old_fval_backup, old_old_fval_backup, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk, old_fval,old_old_fval) if alpha_k is None or alpha_k == 0: # This line search also failed to find a better solution. warnflag = 2 break xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: gfkp1 = myfprime(xk) yk = gfkp1 - gfk beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk,ord=norm) k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] print " Gradient evaluations: %d" % grad_calls[0] if full_output: retlist = xk, fval, func_calls[0], grad_calls[0], warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
format = self.getformat()
def __add__(self, other): format = self.getformat() csc = self.tocsc() res = csc + other return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res
def __add__(self, other): format = self.getformat() csc = self.tocsc() res = csc + other return eval('%s_matrix'%format)(res)
format = self.getformat()
def __sub__(self, other): format = self.getformat() csc = self.tocsc() res = csc - other return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res
def __sub__(self, other): format = self.getformat() csc = self.tocsc() res = csc - other return eval('%s_matrix'%format)(res)
format = self.getformat()
def __rsub__(self, other): # other - self format = self.getformat() csc = self.tocsc() res = csc.__rsub__(other) return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res
def __rsub__(self, other): # other - self format = self.getformat() csc = self.tocsc() res = csc.__rsub__(other) return eval('%s_matrix'%format)(res)
format = self.getformat()
def __mul__(self, other): format = self.getformat() csc = self.tocsc() res = csc * other return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res
def __mul__(self, other): format = self.getformat() csc = self.tocsc() res = csc * other return eval('%s_matrix'%format)(res)
format = self.getformat()
def __rmul__(self, other): format = self.getformat() csc = self.tocsc() res = csc.__rmul__(other) return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res
def __rmul__(self, other): format = self.getformat() csc = self.tocsc() res = csc.__rmul__(other) return eval('%s_matrix'%format)(res)
format = self.getformat()
def __neg__(self): format = self.getformat() csc = self.tocsc() res = -csc return eval('%s_matrix'%format)(res)
return eval('%s_matrix'%format)(res)
return res def matmat(self, other): csc = self.tocsc() res = csc.matmat(other) return res
def __neg__(self): format = self.getformat() csc = self.tocsc() res = -csc return eval('%s_matrix'%format)(res)
format = self.getformat()
def matvec(self, vec): format = self.getformat() csc = self.tocsc() res = csc.matvec(vec) return res
M = max(self.rowind)
M = max(self.rowind) + 1
def __init__(self,s,ij=None,M=None,N=None,nzmax=100,typecode=Float,copy=0): spmatrix.__init__(self, 'csc') if isinstance(s,spmatrix): if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape tcode = s.typecode func = getattr(sparsetools,tcode+'transp') self.data, self.rowind, self.indptr = \ func(s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif isinstance(s,type(3)): M=s N=ij self.data = zeros((nzmax,),typecode) self.rowind = zeros((nzmax,),'i') self.indptr = zeros((N+1,),'i') self.shape = (M,N) elif (isinstance(s,ArrayType) or \ isinstance(s,type([]))): s = asarray(s) if (rank(s) == 2): # converting from a full array M, N = s.shape s = asarray(s) if s.typecode() not in 'fdFD': s = s*1.0 typecode = s.typecode() func = getattr(sparsetools,_transtabl[typecode]+'fulltocsc') ierr = irow = jcol = 0 nnz = sum(ravel(s != 0.0)) a = zeros((nnz,),typecode) rowa = zeros((nnz,),'i') ptra = zeros((N+1,),'i') while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz)
if (len(self.data) != len(self.rowind)):
if (len(self.data) != nzmax):
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.rowind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, rowind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.rowind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != N+1): raise ValueError, "Index pointer should be of of size N+1" if (len(self.rowind)>0) and (max(self.rowind) >= M): raise ValueError, "Row-values must be < M." if (self.indptr[-1] > len(self.rowind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.rowind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': raise ValueError, "Only floating point sparse matrix types allowed" self.ftype = _transtabl[self.typecode]
if (len(self.rowind)>0) and (max(self.rowind) >= M):
if (nzmax>0) and (max(self.rowind[:nnz]) >= M):
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.rowind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, rowind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.rowind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != N+1): raise ValueError, "Index pointer should be of of size N+1" if (len(self.rowind)>0) and (max(self.rowind) >= M): raise ValueError, "Row-values must be < M." if (self.indptr[-1] > len(self.rowind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.rowind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': raise ValueError, "Only floating point sparse matrix types allowed" self.ftype = _transtabl[self.typecode]
self.nnz = self.indptr[-1] self.nzmax = len(self.rowind)
self.nnz = nnz self.nzmax = nzmax
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.rowind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, rowind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.rowind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != N+1): raise ValueError, "Index pointer should be of of size N+1" if (len(self.rowind)>0) and (max(self.rowind) >= M): raise ValueError, "Row-values must be < M." if (self.indptr[-1] > len(self.rowind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.rowind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': raise ValueError, "Only floating point sparse matrix types allowed" self.ftype = _transtabl[self.typecode]
if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds."
if (row < 0): row = M + row if (col < 0): col = N + col if (row < 0) or (col < 0): raise IndexError, "Index out of bounds." if (col >= N): self.indptr = resize1d(self.indptr, col+2) self.indptr[N+1:] = self.indptr[N] N = col+1 if (row >= M): M = row+1 self.shape = (M,N)
def __setitem__(self, key, val): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscsetel') M, N = self.shape if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds." nzmax = self.nzmax if (nzmax < self.nnz+1): # need more room alloc = max(1,self.allocsize) self.data = resize1d(self.data, nzmax + alloc) self.rowind = resize1d(self.rowind, nzmax + alloc) func(self.data, self.rowind, self.indptr, row, col, val) self._check() elif isinstance(key, types.IntType): if (key < self.nnz): self.data[key] = val else: raise KeyError, "Key out of bounds." else: raise NotImplementedError
if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds."
if (row < 0): row = M + row if (col < 0): col = N + col if (row >= M ) or (col >= N) or (row < 0) or (col < 0): raise IndexError, "Index out of bounds."
def __getitem__(self, key): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscgetel') M, N = self.shape if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds." ind, val = func(self.data, self.colind, self.indptr, col, row) return val elif isinstance(key,type(3)): return self.data[key] else: raise NotImplementedError
if not (0<=row<M) or not (0<=col<N):
if (row < 0): row = M + row if (col < 0): col = N + col if (row < 0) or (col < 0):
def __setitem__(self, key, val): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscsetel') M, N = self.shape if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds." nzmax = self.nzmax if (nzmax < self.nnz+1): # need more room alloc = max(1,self.allocsize) self.data = resize1d(self.data, nzmax + alloc) self.colind = resize1d(self.colind, nzmax + alloc) func(self.data, self.colind, self.indptr, col, row, val) self._check() elif isinstance(key, types.IntType): if (key < self.nnz): self.data[key] = val else: raise KeyError, "Key out of bounds." else: raise NotImplementedError
self.typecode = None
self.nnz = 0
def __init__(self,A=None): dict.__init__(self) spmatrix.__init__(self,'dok') self.shape = (0,0) self.typecode = None if A is not None: A = asarray(A) N,M = A.shape for n in range(N): for m in range(M): if A[n,m] != 0: self[n,m] = A[n,m]
res = dictmatrix()
res = dok_matrix()
def __add__(self, other): res = dictmatrix() res.update(self) res.shape = self.shape for key in other.keys(): try: res[key] += other[key] except KeyError: res[key] = other[key] return res
res = dictmatrix()
res = dok_matrix()
def __sub__(self, other): res = dictmatrix() res.update(self) res.shape = self.shape for key in other.keys(): try: res[key] -= other[key] except KeyError: res[key] = -other[key] return res
res = dictmatrix()
res = dok_matrix()
def __neg__(self): res = dictmatrix() for key in self.keys(): res[key] = -self[key] return res
if isinstance(other, dictmatrix):
if isinstance(other, dok_matrix):
def __mul__(self, other): if isinstance(other, dictmatrix): return self.matmat(other) other = asarray(other) if rank(other) > 0: return self.matvec(other) res = dictmatrix() for key in self.keys(): res[key] = other * self[key] return res
res = dictmatrix()
res = dok_matrix()
def __mul__(self, other): if isinstance(other, dictmatrix): return self.matmat(other) other = asarray(other) if rank(other) > 0: return self.matvec(other) res = dictmatrix() for key in self.keys(): res[key] = other * self[key] return res
new = dictmatrix()
new = dok_matrix()
def transp(self): # Transpose (return the transposed) new = dictmatrix() for key in self.keys(): new[key[1],key[0]] = self[key] return new
def matmat(self, other): res = dictmatrix() spself = spmatrix(self) spother = spmatrix(other) spres = spself * spother return spres.todict()
def matmat(self, other): res = dictmatrix() spself = spmatrix(self) spother = spmatrix(other) spres = spself * spother return spres.todict()
res = dictmatrix()
res = dok_matrix()
def take(self, cols_or_rows, columns=1): # Extract columns or rows as indictated from matrix # assume cols_or_rows is sorted res = dictmatrix() indx = int((columns == 1)) N = len(cols_or_rows) if indx: # columns for key in self.keys(): num = searchsorted(cols_or_rows,key[1]) if num < N: newkey = (key[0],num) res[newkey] = self[key] else: for key in self.keys(): num = searchsorted(cols_or_rows,key[0]) if num < N: newkey = (num,key[1]) res[newkey] = self[key] return res
base = dictmatrix() ext = dictmatrix()
base = dok_matrix() ext = dok_matrix()
def split(self, cols_or_rows, columns=1): # similar to take but returns two array, the extracted # columns plus the resulting array # assumes cols_or_rows is sorted base = dictmatrix() ext = dictmatrix() indx = int((columns == 1)) N = len(cols_or_rows) if indx: for key in self.keys(): num = searchsorted(cols_or_rows,key[1]) if cols_or_rows[num]==key[1]: newkey = (key[0],num) ext[newkey] = self[key] else: newkey = (key[0],key[1]-num) base[newkey] = self[key] else: for key in self.keys(): num = searchsorted(cols_or_rows,key[0]) if cols_or_rows[num]==key[0]: newkey = (num,key[1]) ext[newkey] = self[key] else: newkey = (key[0]-num,key[1]) base[newkey] = self[key] return base, ext
def getCSR(self):
def tocsr(self):
def getCSR(self): # Return Compressed Sparse Row format arrays for this matrix keys = self.keys() keys.sort() nnz = len(keys) data = [0]*nnz colind = [0]*nnz row_ptr = [0]*(self.shape[0]+1) current_row = -1 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey0 != current_row: current_row = ikey0 row_ptr[ikey0] = k data[k] = self[key] colind[k] = ikey1 k += 1 row_ptr[-1] = nnz data = array(data) colind = array(colind) row_ptr = array(row_ptr) ptype = data.typecode() if ptype not in ['d','D','f','F']: data = data.astype('d') ptype = 'd' return _transtabl[ptype], nnz, data, colind, row_ptr
ptype = data.typecode() if ptype not in ['d','D','f','F']: data = data.astype('d') ptype = 'd' return _transtabl[ptype], nnz, data, colind, row_ptr def getCSC(self):
return csr_matrix(data,(colind, row_ptr)) def tocsc(self):
def getCSR(self): # Return Compressed Sparse Row format arrays for this matrix keys = self.keys() keys.sort() nnz = len(keys) data = [0]*nnz colind = [0]*nnz row_ptr = [0]*(self.shape[0]+1) current_row = -1 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey0 != current_row: current_row = ikey0 row_ptr[ikey0] = k data[k] = self[key] colind[k] = ikey1 k += 1 row_ptr[-1] = nnz data = array(data) colind = array(colind) row_ptr = array(row_ptr) ptype = data.typecode() if ptype not in ['d','D','f','F']: data = data.astype('d') ptype = 'd' return _transtabl[ptype], nnz, data, colind, row_ptr
ptype = data.typecode() if ptype not in ['d','D','f','F']: data = data.astype('d') ptype = 'd' return _transtabl[ptype], nnz, data, colind, col_ptr def dense(self,typecode=None): if typecode is None: typecode = self.type
return csc_matrix(data, (colind, col_ptr)) def todense(self,typecode=None):
def getCSC(self): # Return Compressed Sparse Column format arrays for this matrix keys = self.keys() keys.sort(csc_cmp) nnz = len(keys) data = [None]*nnz colind = [None]*nnz col_ptr = [None]*(self.shape[1]+1) current_col = -1 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey1 != current_col: current_col = ikey1 col_ptr[ikey1] = k data[k] = self[key] colind[k] = ikey0 k += 1 col_ptr[-1] = nnz data = array(data) colind = array(colind) col_ptr = array(col_ptr) ptype = data.typecode() if ptype not in ['d','D','f','F']: data = data.astype('d') ptype = 'd' return _transtabl[ptype], nnz, data, colind, col_ptr
pass
def __init__(self, obj, ij, M=None, N=None, nzmax=None, typecode=None): aobj = asarray(obj) aij = asarray(ij) if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self.row = aij[:,0] self.col = aij[:,1] self.typecode = aobj.typecode() self._check() def _check(self): nnz = len(self.data) if (nnz != len(self.row)) or (nnz != len(self.col)): raise ValueError, "Row, column, and data array must all be "\ "the same length." if (self.nzmax < nnz): raise ValueError, "nzmax must be >= nnz" self.ftype = _transtabl[self.typecode] def tocsc(self): func = getattr(sparsetools,self.ftype+"cootocsc") a, rowa, ptra, ierr = func(self.data, self.row, self.col) if ierr: raise RuntimeError, "Error in conversion." return csc_matrix(a, (rowa, ptra))
def dense(self,typecode=None): if typecode is None: typecode = self.type if typecode is None: typecode = 'd' new = zeros(self.shape,typecode) for key in self.keys(): ikey0 = int(key[0]) ikey1 = int(key[1]) new[ikey0,ikey1] = self[key] return new
diagfunc = eval('_sparsekit.'+_transtabl[mtype]+'diacsr') nzmax = diags.shape[0]*diags.shape[1] s = spmatrix(m,n,nzmax,typecode=mtype) diagfunc(array(m), array(n), array(0), diags, offsets, s.data, s.index[0], s.index[1], array(diags.shape[1]),array(diags.shape[0])) s.lastel = min([m,n])*len(offsets) - 1 - \ sum(_spdiags_tosub(offsets, a=min([n-m,0]), b=max([n-m,0]))) return s
diagfunc = eval('sparsetools.'+_transtabl[mtype]+'diatocsr') a, rowa, ptra, ierr = diagfunc(m,n,diags,offsets) if ierr: raise ValueError, "Ran out of memory (shouldn't have happened)" return csc_matrix(a,(rowa,ptra),M=m,N=n)
def spdiags(diags,offsets,m,n): """Return a sparse matrix given it's diagonals. B = spdiags(diags, offsets, M, N) Inputs: diags -- rows contain diagonal values offsets -- diagonals to set (0 is main) M, N -- sparse matrix returned is M X N """ diags = array(transpose(diags),copy=1) if diags.typecode() not in 'fdFD': diags = diags.astype('d') offsets = array(offsets,copy=0) mtype = diags.typecode() assert(len(offsets) == diags.shape[1]) # set correct diagonal to csr conversion routine for this type diagfunc = eval('_sparsekit.'+_transtabl[mtype]+'diacsr') # construct empty sparse matrix and pass it's main parameters to # the diagonal to csr conversion routine. nzmax = diags.shape[0]*diags.shape[1] s = spmatrix(m,n,nzmax,typecode=mtype) diagfunc(array(m), array(n), array(0), diags, offsets, s.data, s.index[0], s.index[1], array(diags.shape[1]),array(diags.shape[0])) # compute how-many elements were actually filled s.lastel = min([m,n])*len(offsets) - 1 - \ sum(_spdiags_tosub(offsets, a=min([n-m,0]), b=max([n-m,0]))) return s
A.ftype, A.nnz, A.data, A.rowind, A.indptr
mat.ftype, mat.nnz, mat.data, mat.rowind, mat.indptr
def solve(A,b,permc_spec=2): if not hasattr(A, 'tocsr') and not hasattr(A, 'tocsc'): raise ValueError, "Sparse matrix must be able to return CSC format--"\ "A.tocsc()--or CSR format--A.tocsr()" if not hasattr(A,'shape'): raise ValueError, "Sparse matrix must be able to return shape (rows,cols) = A.shape" M,N = A.shape if (M != N): raise ValueError, "Matrix must be square." if hasattr(A, 'tocsc'): mat = A.tocsc() ftype, lastel, data, index0, index1 = \ A.ftype, A.nnz, A.data, A.rowind, A.indptr csc = 1 else: mat = A.tocsr() ftype, lastel, data, index0, index1 = \ A.ftype, A.nnz, A.data, A.colind, A.indptr csc = 0 gssv = eval('_superlu.' + ftype + 'gssv') return gssv(N,lastel,data,index0,index1,b,csc,permc_spec)[0]
A.ftype, A.nnz, A.data, A.colind, A.indptr
mat.ftype, mat.nnz, mat.data, mat.colind, mat.indptr
def solve(A,b,permc_spec=2): if not hasattr(A, 'tocsr') and not hasattr(A, 'tocsc'): raise ValueError, "Sparse matrix must be able to return CSC format--"\ "A.tocsc()--or CSR format--A.tocsr()" if not hasattr(A,'shape'): raise ValueError, "Sparse matrix must be able to return shape (rows,cols) = A.shape" M,N = A.shape if (M != N): raise ValueError, "Matrix must be square." if hasattr(A, 'tocsc'): mat = A.tocsc() ftype, lastel, data, index0, index1 = \ A.ftype, A.nnz, A.data, A.rowind, A.indptr csc = 1 else: mat = A.tocsr() ftype, lastel, data, index0, index1 = \ A.ftype, A.nnz, A.data, A.colind, A.indptr csc = 0 gssv = eval('_superlu.' + ftype + 'gssv') return gssv(N,lastel,data,index0,index1,b,csc,permc_spec)[0]
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_simple(self):
assert_array_almost_equal(numpy.matrixmultiply(a,x0),[1,0])
assert_array_almost_equal(numpy.dot(a,x0),[1,0])
def check_20Feb04_bug(self): a = [[1,1],[1.0,0]] # ok x0 = solve(a,[1,0j]) assert_array_almost_equal(numpy.matrixmultiply(a,x0),[1,0])
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_simple(self): a = [[1,20],[-30,4]] for b in ([[1,0],[0,1]],[1,0], [[2,1],[-30,4]]): x = solve(a,b) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_simple_sym(self): a = [[2,3],[3,5]] for lower in [0,1]: for b in ([[1,0],[0,1]],[1,0]): x = solve(a,b,sym_pos=1,lower=lower) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_simple_sym_complex(self): a = [[5,2],[2,4]] for b in [[1j,0], [[1j,1j], [0,2]], ]: x = solve(a,b,sym_pos=1) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_simple_complex(self): a = array([[5,2],[2j,4]],'D') for b in [[1j,0], [[1j,1j], [0,2]], [1,0j], array([1,0],'D'), ]: x = solve(a,b) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_random(self):
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_random_complex(self): n = 20 a = random([n,n]) + 1j * random([n,n]) for i in range(n): a[i,i] = 20*(.1+a[i,i]) for i in range(2): b = random([n,3]) x = solve(a,b) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_random_sym(self): n = 20 a = random([n,n]) for i in range(n): a[i,i] = abs(20*(.1+a[i,i])) for j in range(i): a[i,j] = a[j,i] for i in range(4): b = random([n]) x = solve(a,b,sym_pos=1) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.dot(a,x),b)
def check_random_sym_complex(self): n = 20 a = random([n,n]) #a = a + 1j*random([n,n]) # XXX: with this the accuracy will be very low for i in range(n): a[i,i] = abs(20*(.1+a[i,i])) for j in range(i): a[i,j] = numpy.conjugate(a[j,i]) b = random([n])+2j*random([n]) for i in range(2): x = solve(a,b,sym_pos=1) assert_array_almost_equal(numpy.matrixmultiply(a,x),b)
assert_array_almost_equal(numpy.matrixmultiply(a,a_inv),
assert_array_almost_equal(numpy.dot(a,a_inv),
def check_simple(self): a = [[1,2],[3,4]] a_inv = inv(a) assert_array_almost_equal(numpy.matrixmultiply(a,a_inv), [[1,0],[0,1]]) a = [[1,2,3],[4,5,6],[7,8,10]] a_inv = inv(a) assert_array_almost_equal(numpy.matrixmultiply(a,a_inv), [[1,0,0],[0,1,0],[0,0,1]])
assert_array_almost_equal(numpy.matrixmultiply(a,a_inv),
assert_array_almost_equal(numpy.dot(a,a_inv),
def check_random(self): n = 20 for i in range(4): a = random([n,n]) for i in range(n): a[i,i] = 20*(.1+a[i,i]) a_inv = inv(a) assert_array_almost_equal(numpy.matrixmultiply(a,a_inv), numpy.identity(n))
assert_array_almost_equal(numpy.matrixmultiply(a,a_inv),
assert_array_almost_equal(numpy.dot(a,a_inv),
def check_simple_complex(self): a = [[1,2],[3,4j]] a_inv = inv(a) assert_array_almost_equal(numpy.matrixmultiply(a,a_inv), [[1,0],[0,1]])