rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
elif not byte_count: getter = Mat5EmptyMatrixGetter(self)
def matrix_getter_factory(self): ''' Returns reader for next matrix at top level ''' tag = self.read_dtype(self.dtypes['tag_full']) mdtype = tag['mdtype'] byte_count = tag['byte_count'] next_pos = self.mat_stream.tell() + byte_count if mdtype == miCOMPRESSED: getter = Mat5ZArrayReader(self, byte_count).matrix_getter_factory() elif not mdtype == miMATRIX: raise TypeError, \ 'Expecting miMATRIX type here, got %d' % mdtype elif not byte_count: # an empty miMATRIX can contain no bytes getter = Mat5EmptyMatrixGetter(self) else: getter = self.current_getter() getter.next_position = next_pos return getter
480afd5e75d2805992c5f44fabda71fe19a0a93a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/480afd5e75d2805992c5f44fabda71fe19a0a93a/mio5.py
getter = self.current_getter()
getter = self.current_getter(byte_count)
def matrix_getter_factory(self): ''' Returns reader for next matrix at top level ''' tag = self.read_dtype(self.dtypes['tag_full']) mdtype = tag['mdtype'] byte_count = tag['byte_count'] next_pos = self.mat_stream.tell() + byte_count if mdtype == miCOMPRESSED: getter = Mat5ZArrayReader(self, byte_count).matrix_getter_factory() elif not mdtype == miMATRIX: raise TypeError, \ 'Expecting miMATRIX type here, got %d' % mdtype elif not byte_count: # an empty miMATRIX can contain no bytes getter = Mat5EmptyMatrixGetter(self) else: getter = self.current_getter() getter.next_position = next_pos return getter
480afd5e75d2805992c5f44fabda71fe19a0a93a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/480afd5e75d2805992c5f44fabda71fe19a0a93a/mio5.py
def current_getter(self):
def current_getter(self, byte_count):
def current_getter(self): ''' Return matrix getter for current stream position
480afd5e75d2805992c5f44fabda71fe19a0a93a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/480afd5e75d2805992c5f44fabda71fe19a0a93a/mio5.py
derphi_a1 = phiprime(alpha1)
def phiprime(alpha): global gc gc += 1 return Num.dot(fprime(xk+alpha*pk,*args),pk)
4077c8368c89a4f27a0b561ebcfc071aeafb7ebf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4077c8368c89a4f27a0b561ebcfc071aeafb7ebf/optimize.py
allvecs -- a list of all iterates
allvecs -- a list of all iterates (only returned if retall==1)
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1.49e-8, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
4077c8368c89a4f27a0b561ebcfc071aeafb7ebf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4077c8368c89a4f27a0b561ebcfc071aeafb7ebf/optimize.py
config.add_subpackage('cluster') config.add_subpackage('fftpack') config.add_subpackage('integrate') config.add_subpackage('interpolate') config.add_subpackage('io')
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('scipy',parent_package,top_path) #config.add_subpackage('cluster') #config.add_subpackage('fftpack') #config.add_subpackage('integrate') #config.add_subpackage('interpolate') #config.add_subpackage('io') config.add_subpackage('lib') config.add_subpackage('linalg') #config.add_subpackage('linsolve') #config.add_subpackage('maxentropy') config.add_subpackage('misc') #config.add_subpackage('montecarlo') config.add_subpackage('optimize') #config.add_subpackage('sandbox') #config.add_subpackage('signal') #config.add_subpackage('sparse') config.add_subpackage('special') config.add_subpackage('stats') #config.add_subpackage('ndimage') #config.add_subpackage('weave') config.make_svn_version_py() # installs __svn_version__.py config.make_config_py() return config
122ba850fb9d7c9ca51d66714dd38cb2187134f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/122ba850fb9d7c9ca51d66714dd38cb2187134f3/setup.py
config.add_subpackage('linsolve') config.add_subpackage('maxentropy')
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('scipy',parent_package,top_path) #config.add_subpackage('cluster') #config.add_subpackage('fftpack') #config.add_subpackage('integrate') #config.add_subpackage('interpolate') #config.add_subpackage('io') config.add_subpackage('lib') config.add_subpackage('linalg') #config.add_subpackage('linsolve') #config.add_subpackage('maxentropy') config.add_subpackage('misc') #config.add_subpackage('montecarlo') config.add_subpackage('optimize') #config.add_subpackage('sandbox') #config.add_subpackage('signal') #config.add_subpackage('sparse') config.add_subpackage('special') config.add_subpackage('stats') #config.add_subpackage('ndimage') #config.add_subpackage('weave') config.make_svn_version_py() # installs __svn_version__.py config.make_config_py() return config
122ba850fb9d7c9ca51d66714dd38cb2187134f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/122ba850fb9d7c9ca51d66714dd38cb2187134f3/setup.py
config.add_subpackage('sandbox') config.add_subpackage('signal') config.add_subpackage('sparse')
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('scipy',parent_package,top_path) #config.add_subpackage('cluster') #config.add_subpackage('fftpack') #config.add_subpackage('integrate') #config.add_subpackage('interpolate') #config.add_subpackage('io') config.add_subpackage('lib') config.add_subpackage('linalg') #config.add_subpackage('linsolve') #config.add_subpackage('maxentropy') config.add_subpackage('misc') #config.add_subpackage('montecarlo') config.add_subpackage('optimize') #config.add_subpackage('sandbox') #config.add_subpackage('signal') #config.add_subpackage('sparse') config.add_subpackage('special') config.add_subpackage('stats') #config.add_subpackage('ndimage') #config.add_subpackage('weave') config.make_svn_version_py() # installs __svn_version__.py config.make_config_py() return config
122ba850fb9d7c9ca51d66714dd38cb2187134f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/122ba850fb9d7c9ca51d66714dd38cb2187134f3/setup.py
config.add_subpackage('ndimage') config.add_subpackage('weave')
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('scipy',parent_package,top_path) #config.add_subpackage('cluster') #config.add_subpackage('fftpack') #config.add_subpackage('integrate') #config.add_subpackage('interpolate') #config.add_subpackage('io') config.add_subpackage('lib') config.add_subpackage('linalg') #config.add_subpackage('linsolve') #config.add_subpackage('maxentropy') config.add_subpackage('misc') #config.add_subpackage('montecarlo') config.add_subpackage('optimize') #config.add_subpackage('sandbox') #config.add_subpackage('signal') #config.add_subpackage('sparse') config.add_subpackage('special') config.add_subpackage('stats') #config.add_subpackage('ndimage') #config.add_subpackage('weave') config.make_svn_version_py() # installs __svn_version__.py config.make_config_py() return config
122ba850fb9d7c9ca51d66714dd38cb2187134f3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/122ba850fb9d7c9ca51d66714dd38cb2187134f3/setup.py
modname = __name__[__name__.rfind('.')-1:] + '.expressions'
modname = __name__[:__name__.rfind('.')] + '.expressions'
def makeExpressions(context): """Make private copy of the expressions module with a custom get_context(). An attempt was made to make this threadsafe, but I can't guarantee it's bulletproof. """ import sys, imp modname = __name__[__name__.rfind('.')-1:] + '.expressions' # get our own, private copy of expressions imp.acquire_lock() try: old = sys.modules.pop(modname) import expressions private = sys.modules.pop(modname) sys.modules[modname] = old finally: imp.release_lock() def get_context(): return context private.get_context = get_context return private
85ac30941cdafe4a54c3533d3851ad4feca6ce0c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/85ac30941cdafe4a54c3533d3851ad4feca6ce0c/compiler.py
wx_class.init2 = wx_class.__init__ wx_class.__init__ = plain_class__init__
if not hasattr(wx_class, 'init2'): wx_class.init2 = wx_class.__init__ wx_class.__init__ = plain_class__init__
def register(wx_class): """ Create a gui_thread compatible version of wx_class Test whether a proxy is necessary. If so, generate and return the proxy class. if not, just return the wx_class unaltered. """ if running_in_second_thread: #print 'proxy generated' return proxify(wx_class) else: wx_class.init2 = wx_class.__init__ wx_class.__init__ = plain_class__init__ return wx_class
7431e8dea40ba994ac9fd075b281c1a17cc5d5e8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7431e8dea40ba994ac9fd075b281c1a17cc5d5e8/main.py
from gui_thread_guts import proxy_event, print_exception, smart_return
from gui_thread_guts import proxy_event, smart_return
body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, print_exception, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: print_exception(finished.exception_info) raise finished.exception_info['type'],finished.exception_info['value'] %(results)s #results\n""" %locals()
7431e8dea40ba994ac9fd075b281c1a17cc5d5e8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7431e8dea40ba994ac9fd075b281c1a17cc5d5e8/main.py
print_exception(finished.exception_info) raise finished.exception_info['type'],finished.exception_info['value']
raise finished.exception_info[0],finished.exception_info[1]
body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, print_exception, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: print_exception(finished.exception_info) raise finished.exception_info['type'],finished.exception_info['value'] %(results)s #results\n""" %locals()
7431e8dea40ba994ac9fd075b281c1a17cc5d5e8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7431e8dea40ba994ac9fd075b281c1a17cc5d5e8/main.py
def Numeric_random(size): import random from Numeric import zeros, Float64 results = zeros(size,Float64) f = results.flat for i in range(len(f)): f[i] = random.random() return results
def Numeric_random(size): import random from Numeric import zeros, Float64 results = zeros(size,Float64) f = results.flat for i in range(len(f)): f[i] = random.random() return results
74e500e9e0fe48a7f742e5b48156f56f6db24d87 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/74e500e9e0fe48a7f742e5b48156f56f6db24d87/test_basic.py
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon,
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-4, norm=Inf, epsilon=_epsilon,
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. maxgtol -- maximum allowable gradient magnitude for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] gtol = maxgtol sk = [2*gtol] warnflag = 0 while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: #warnflag = 2 #break print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
maxgtol -- maximum allowable gradient magnitude for stopping
gtol -- gradient norm must be less than gtol before succesful termination norm -- order of norm (Inf is max, -Inf is min)
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. maxgtol -- maximum allowable gradient magnitude for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] gtol = maxgtol sk = [2*gtol] warnflag = 0 while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: #warnflag = 2 #break print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
gtol = maxgtol
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. maxgtol -- maximum allowable gradient magnitude for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] gtol = maxgtol sk = [2*gtol] warnflag = 0 while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: #warnflag = 2 #break print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter):
gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter):
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. maxgtol -- maximum allowable gradient magnitude for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] gtol = maxgtol sk = [2*gtol] warnflag = 0 while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: #warnflag = 2 #break print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
gfk = gfkp1
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. maxgtol -- maximum allowable gradient magnitude for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] gtol = maxgtol sk = [2*gtol] warnflag = 0 while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: #warnflag = 2 #break print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon,
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-4, norm=Inf, epsilon=_epsilon,
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
avegtol -- minimum average value of gradient for stopping
gtol -- stop when norm of gradient is less than gtol norm -- order of vector norm to use
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be.
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
gtol = N*avegtol
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter):
gnorm = vecnorm(gfk,ord=norm) while (gnorm > gtol) and (k < maxiter):
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- if retall then this vector of the iterates is returned Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if True """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol xk = x0 old_fval = f(xk,*args) old_old_fval = old_fval + 5000 func_calls +=1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 if retall: allvecs = [xk] sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args,c2=0.3) func_calls += fc grad_calls += gc xk = xk + alpha_k*pk if retall: allvecs.append(xk) if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xk,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xk,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk beta_k = pymax(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
0b485ee95cd5b21a0a959c6b89ed7b94a83dd453 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b485ee95cd5b21a0a959c6b89ed7b94a83dd453/optimize.py
This can be instantiated in two ways:
This can be instantiated in several ways:
def copy(self): csc = self.tocsc() return csc.copy()
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
??
standard CSC representation
def copy(self): csc = self.tocsc() return csc.copy()
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
def copy(self): csc = self.tocsc() return csc.copy()
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr)
if copy: self.data = array(s) self.rowind = array(rowind) self.indptr = array(indptr) else: self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr)
def __init__(self, arg1, dims=(None,None), nzmax=100, dtype='d', copy=False): spmatrix.__init__(self) if isdense(arg1): # Convert the dense matrix arg1 to CSC format if rank(arg1) == 2: s = asarray(arg1) if s.dtypechar not in 'fdFD': # Use a double array as the source (but leave it alone) s = s*1.0 if (rank(s) == 2): M, N = s.shape dtype = s.dtypechar func = getattr(sparsetools, _transtabl[dtype]+'fulltocsc') ierr = irow = jcol = 0 nnz = sum(ravel(s != 0.0)) a = zeros((nnz,), dtype) rowa = zeros((nnz,), 'i') ptra = zeros((N+1,), 'i') while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz) self.data = a self.rowind = rowa self.indptr = ptra self.shape = (M, N) # s = dok_matrix(arg1).tocsc(nzmax) # self.shape = s.shape # self.data = s.data # self.rowind = s.rowind # self.indptr = s.indptr else: raise ValueError, "dense array does not have rank 2" elif isspmatrix(arg1): s = arg1 if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.rowind, self.indptr = \ func(s.shape[1], s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: try: # Assume it's a tuple of matrix dimensions (M, N) (M, N) = arg1 M = int(M) # will raise TypeError if (data, ij) N = int(N) self.data = zeros((nzmax,), dtype) self.rowind = zeros((nzmax,), int) self.indptr = zeros((N+1,), int) self.shape = (M, N) except (ValueError, TypeError): try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) temp = coo_matrix((s, ij), dims=dims, nzmax=nzmax, \ dtype=dtype).tocsc() self.shape = temp.shape self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr except: try: # Try interpreting it as (data, rowind, indptr) (s, rowind, indptr) = arg1 self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csc_matrix constructor" else: raise ValueError, "unrecognized form for csc_matrix constructor"
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
This can be instantiated in four ways: 1. csr_matrix(s)
This can be instantiated in several ways: - csr_matrix(d) with a dense matrix d - csr_matrix(s)
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtypechar) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
2. csr_matrix((M, N), [nzmax, dtype])
- csr_matrix((M, N), [nzmax, dtype])
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtypechar) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
3. csr_matrix(data, ij, [(M, N), nzmax])
- csr_matrix((data, ij), [(M, N), nzmax])
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtypechar) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
4. csr_matrix(data, (row, ptr), [(M, N)]) ??
- csr_matrix((data, row, ptr), [(M, N)]) standard CSR representation
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtypechar) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
def __init__(self, arg1, arg2=None, arg3=None, nzmax=100, dtype='d', copy=False):
def __init__(self, arg1, dims=(None,None), nzmax=100, dtype='d', copy=False):
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtypechar) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
if isspmatrix(arg1):
if isdense(arg1): if rank(arg1) == 2: s = asarray(arg1) ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) elif isspmatrix(arg1):
def __init__(self, arg1, arg2=None, arg3=None, nzmax=100, dtype='d', copy=False): spmatrix.__init__(self) if isspmatrix(arg1): s = arg1 if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: try: assert len(arg1) == 2 and type(arg1[0]) == int and type(arg1[1]) == int except AssertionError: raise TypeError, "matrix dimensions must be a tuple of two integers" (M, N) = arg1 self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), 'i') self.indptr = zeros((M+1,), 'i') self.shape = (M, N) elif isinstance(arg1, ArrayType) or type(arg1) == list: s = asarray(arg1) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0])
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
temp = csr_matrix(s.tocsc())
temp = csr_matrix(s.tocsc())
def __init__(self, arg1, arg2=None, arg3=None, nzmax=100, dtype='d', copy=False): spmatrix.__init__(self) if isspmatrix(arg1): s = arg1 if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: try: assert len(arg1) == 2 and type(arg1[0]) == int and type(arg1[1]) == int except AssertionError: raise TypeError, "matrix dimensions must be a tuple of two integers" (M, N) = arg1 self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), 'i') self.indptr = zeros((M+1,), 'i') self.shape = (M, N) elif isinstance(arg1, ArrayType) or type(arg1) == list: s = asarray(arg1) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0])
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
assert len(arg1) == 2 and type(arg1[0]) == int and type(arg1[1]) == int except AssertionError: raise TypeError, "matrix dimensions must be a tuple of two integers" (M, N) = arg1 self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), 'i') self.indptr = zeros((M+1,), 'i') self.shape = (M, N) elif isinstance(arg1, ArrayType) or type(arg1) == list: s = asarray(arg1) if (rank(s) == 2): ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) elif isinstance(arg2, ArrayType) and (rank(arg2) == 2) and (shape(arg2) == (len(s), 2)): ij = arg2 ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix(s, ijnew, dims=(M, N), nzmax=nzmax, dtype=dtype) temp = temp.tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr elif type(arg2) == tuple and len(arg2)==2: self.data = asarray(s) self.colind = arg2[0] self.indptr = arg2[1] if arg3 != None:
(M, N) = arg1 M = int(M) N = int(N) self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), int) self.indptr = zeros((M+1,), int) self.shape = (M, N) except (ValueError, TypeError): try: (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix(s, ijnew, dims=(M, N), nzmax=nzmax, dtype=dtype) temp = temp.tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.rowind self.indptr = temp.indptr except:
def __init__(self, arg1, arg2=None, arg3=None, nzmax=100, dtype='d', copy=False): spmatrix.__init__(self) if isspmatrix(arg1): s = arg1 if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: try: assert len(arg1) == 2 and type(arg1[0]) == int and type(arg1[1]) == int except AssertionError: raise TypeError, "matrix dimensions must be a tuple of two integers" (M, N) = arg1 self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), 'i') self.indptr = zeros((M+1,), 'i') self.shape = (M, N) elif isinstance(arg1, ArrayType) or type(arg1) == list: s = asarray(arg1) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0])
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
M, N = arg3 except TypeError: raise TypeError, "argument 3 must be a pair (M, N) of dimensions" else: M = N = None if N is None: try: N = int(amax(self.colind)) + 1 except ValueError: N = 0 if M is None: M = len(self.indptr) - 1 if M == -1: M = 0 self.shape = (M, N) else: raise ValueError, "unrecognized form for csr_matrix constructor"
(s, colind, indptr) = arg1 if copy: self.data = array(s) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor"
def __init__(self, arg1, arg2=None, arg3=None, nzmax=100, dtype='d', copy=False): spmatrix.__init__(self) if isspmatrix(arg1): s = arg1 if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: try: assert len(arg1) == 2 and type(arg1[0]) == int and type(arg1[1]) == int except AssertionError: raise TypeError, "matrix dimensions must be a tuple of two integers" (M, N) = arg1 self.data = zeros((nzmax,), dtype) self.colind = zeros((nzmax,), 'i') self.indptr = zeros((M+1,), 'i') self.shape = (M, N) elif isinstance(arg1, ArrayType) or type(arg1) == list: s = asarray(arg1) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0])
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csr_matrix(c, (colc, ptrc), (M, N))
return csr_matrix((c, colc, ptrc), dims=(M, N))
def __add__(self, other): # First check if argument is a scalar if isscalar(other) or (isdense(other) and rank(other)==0): # Now we would add this scalar to every element. raise NotImplementedError, 'adding a scalar to a sparse matrix ' \ 'is not yet supported' elif isspmatrix(other): ocs = other.tocsr() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes"
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csr_matrix(c, (colc, ptrc), (M, N))
return csr_matrix((c, colc, ptrc), dims=(M, N))
def __pow__(self, other): """ Element-by-element power (unless other is a scalar, in which case return the matrix power.) """ if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = new.data ** other new.dtypechar = new.data.dtypechar new.ftype = _transtabl[new.dtypechar] return new elif isspmatrix(other): ocs = other.tocsr() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtypechar, ocs.dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscmul') c, colc, ptrc, ierr = func(data1, self.colind, self.indptr, data2, ocs.colind, ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csr_matrix(c, (colc, ptrc), (M, N)) else: raise TypeError, "unsupported type for sparse matrix power"
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csr_matrix(c, (rowc, ptrc), (M, N))
return csr_matrix((c, rowc, ptrc), dims=(M, N))
def matmat(self, other): if isspmatrix(other): M, K1 = self.shape K2, N = other.shape a, rowa, ptra = self.data, self.colind, self.indptr if (K1 != K2): raise ValueError, "shape mismatch error" if isinstance(other, csc_matrix): other._check() dtypechar = _coerce_rules[(self.dtypechar, other.dtypechar)] ftype = _transtabl[dtypechar] func = getattr(sparsetools, ftype+'csrmucsc') b = other.data colb = other.rowind ptrb = other.indptr out = 'csc' firstarg = () elif isinstance(other, csr_matrix): other._check() dtypechar = _coerce_rules[(self.dtypechar, other.dtypechar)] ftype = _transtabl[dtypechar] func = getattr(sparsetools, ftype+'cscmucsc') b, colb, ptrb = a, rowa, ptra a, rowa, ptra = other.data, other.colind, other.indptr out = 'csr' firstarg = (N,) else: other = other.tocsc() dtypechar = _coerce_rules[(self.dtypechar, other.dtypechar)] ftype = _transtabl[dtypechar] func = getattr(sparsetools, ftype+'csrmucsc') b = other.data colb = other.rowind ptrb = other.indptr out = 'csc' firstarg = () a, b = _convert_data(a, b, dtypechar) newshape = (M, N) if out == 'csr': ptrc = zeros((M+1,), 'i') else: ptrc = zeros((N+1,), 'i') nnzc = 2*max(ptra[-1], ptrb[-1]) c = zeros((nnzc,), dtypechar) rowc = zeros((nnzc,), 'i') ierr = irow = kcol = 0 while 1: args = firstarg+(a, rowa, ptra, b, colb, ptrb, c, rowc, ptrc, irow, kcol, ierr) c, rowc, ptrc, irow, kcol, ierr = func(*args) if (ierr==0): break # otherwise we were too small and must resize percent_to_go = 1- (1.0*kcol) / N newnnzc = int(ceil((1+percent_to_go)*nnzc)) c = resize1d(c, newnnzc) rowc = resize1d(rowc, newnnzc) nnzc = newnnzc
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csr_matrix(data, (colind, row_ptr))
return csr_matrix((data, colind, row_ptr), nzmax=nzmax)
def tocsr(self, nzmax=None): """ Return Compressed Sparse Row format arrays for this matrix """ keys = self.keys() keys.sort() nnz = self.nnz assert nnz == len(keys) nzmax = max(nnz, nzmax) data = [0]*nzmax colind = [0]*nzmax row_ptr = [0]*(self.shape[0]+1) current_row = 0 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey0 != current_row: N = ikey0-current_row row_ptr[current_row+1:ikey0+1] = [k]*N current_row = ikey0 data[k] = self[key] colind[k] = ikey1 k += 1 row_ptr[-1] = nnz data = array(data) colind = array(colind) row_ptr = array(row_ptr) return csr_matrix(data, (colind, row_ptr))
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csc_matrix((data, rowind, col_ptr))
return csc_matrix((data, rowind, col_ptr), nzmax=nzmax)
def tocsc(self, nzmax=None): """ Return Compressed Sparse Column format arrays for this matrix """ keys = self.keys() # Sort based on columns keys.sort(csc_cmp) nnz = self.nnz assert nnz == len(keys) nzmax = max(nnz, nzmax) data = [0]*nzmax rowind = [0]*nzmax col_ptr = [0]*(self.shape[1]+1) current_col = 0 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey1 != current_col: N = ikey1-current_col col_ptr[current_col+1:ikey1+1] = [k]*N current_col = ikey1 data[k] = self[key] rowind[k] = ikey0 k += 1 col_ptr[-1] = nnz data = array(data) rowind = array(rowind) col_ptr = array(col_ptr) return csc_matrix((data, rowind, col_ptr))
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
return csr_matrix(a, (cola, ptra), self.shape)
return csr_matrix((a, cola, ptra), dims=self.shape)
def tocsr(self): func = getattr(sparsetools, self.ftype+"cootocsc") data, row, col = self._normalize(rowfirst=True) a, cola, ptra, ierr = func(self.shape[0], data, col, row) if ierr: raise RuntimeError, "error in conversion" return csr_matrix(a, (cola, ptra), self.shape)
a9fa59f2c7be4099d580674761bd725b0c430682 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a9fa59f2c7be4099d580674761bd725b0c430682/sparse.py
config.add_subpackage('cluster')
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('scipy',parent_package,top_path) config.add_subpackage('cluster') config.add_subpackage('fftpack') config.add_subpackage('integrate') config.add_subpackage('interpolate') config.add_subpackage('io') config.add_subpackage('lib') config.add_subpackage('linalg') config.add_subpackage('linsolve') config.add_subpackage('maxentropy') config.add_subpackage('misc') #config.add_subpackage('montecarlo') config.add_subpackage('optimize') config.add_subpackage('sandbox') config.add_subpackage('signal') config.add_subpackage('sparse') config.add_subpackage('special') config.add_subpackage('stats') config.add_subpackage('ndimage') config.add_subpackage('weave') config.make_svn_version_py() # installs __svn_version__.py config.make_config_py() return config
1e2ea0dabf89fc720f1bdccaa96e682c6b6ecd74 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1e2ea0dabf89fc720f1bdccaa96e682c6b6ecd74/setup.py
row_ptr[-1] = nnz+1
row_ptr[-1] = nnz
def getCSR(self): # Return Compressed Sparse Row format arrays for this matrix keys = self.keys() keys.sort() nnz = len(keys) data = [None]*nnz colind = [None]*nnz row_ptr = [None]*(self.shape[0]+1) current_row = -1 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey0 != current_row: current_row = ikey0 row_ptr[ikey0] = k data[k] = self[key] colind[k] = ikey1 k += 1 row_ptr[-1] = nnz+1 data = array(data) colind = array(colind) row_ptr = array(row_ptr) ftype = data.typecode() if ftype not in ['d','D','f','F']: data = data*1.0 ftype = 'd' return ftype, nnz, data, colind, row_ptr
bf0358bb908d1195acf4409769fcf327fca47b49 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bf0358bb908d1195acf4409769fcf327fca47b49/Sparse.py
col_ptr[-1] = nnz+1
col_ptr[-1] = nnz
def getCSC(self): # Return Compressed Sparse Column format arrays for this matrix keys = self.keys() keys.sort(csc_cmp) nnz = len(keys) data = [None]*nnz rowind = [None]*nnz col_ptr = [None]*(self.shape[1]+1) current_col = -1 k = 0 for key in keys: ikey0 = int(key[0]) ikey1 = int(key[1]) if ikey1 != current_col: current_col = ikey1 col_ptr[ikey1] = k data[k] = self[key] rowind[k] = ikey0 k += 1 col_ptr[-1] = nnz+1 data = array(data) rowind = array(rowind) col_ptr = array(col_ptr) ftype = data.typecode() if ftype not in ['d','D','f','F']: data = data*1.0 ftype = 'd' return ftype, nnz, data, rowind, col_ptr
bf0358bb908d1195acf4409769fcf327fca47b49 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bf0358bb908d1195acf4409769fcf327fca47b49/Sparse.py
def sparse_linear_solve(A,b):
def sparse_linear_solve(A,b,permc_spec=0):
def sparse_linear_solve(A,b): if not hasattr(A, 'getCSR') and not hasattr(A, 'getCSC'): raise ValueError, "Sparse matrix must be able to return CSC format--"\ "A.getCSC()--or CSR format--A.getCSR()" if not hasattr(A,'shape'): raise ValueError, "Sparse matrix must be able to return shape (rows,cols) = A.shape" if hasattr(A, 'getCSC'): ftype, lastel, data, index0, index1 = A.getCSC() csc = 1 else: ftype, lastel, data, index0, index1 = A.getCSR() csc = 0 M,N = A.shape gssv = eval('_superlu.' + ftype + 'gssv') return gssv(M,N,lastel,data,index0,index1,b,csc)
bf0358bb908d1195acf4409769fcf327fca47b49 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bf0358bb908d1195acf4409769fcf327fca47b49/Sparse.py
return gssv(M,N,lastel,data,index0,index1,b,csc)
return gssv(M,N,lastel,data,index0,index1,b,csc,permc_spec)
def sparse_linear_solve(A,b): if not hasattr(A, 'getCSR') and not hasattr(A, 'getCSC'): raise ValueError, "Sparse matrix must be able to return CSC format--"\ "A.getCSC()--or CSR format--A.getCSR()" if not hasattr(A,'shape'): raise ValueError, "Sparse matrix must be able to return shape (rows,cols) = A.shape" if hasattr(A, 'getCSC'): ftype, lastel, data, index0, index1 = A.getCSC() csc = 1 else: ftype, lastel, data, index0, index1 = A.getCSR() csc = 0 M,N = A.shape gssv = eval('_superlu.' + ftype + 'gssv') return gssv(M,N,lastel,data,index0,index1,b,csc)
bf0358bb908d1195acf4409769fcf327fca47b49 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bf0358bb908d1195acf4409769fcf327fca47b49/Sparse.py
def str_array(arr, precision=5,col_sep=' ',row_sep="\n"):
def str_array(arr, precision=5,col_sep=' ',row_sep="\n",ss=0):
def str_array(arr, precision=5,col_sep=' ',row_sep="\n"): thestr = [] arr = asarray(arr) N,M = arr.shape thistype = arr.typecode() nofloat = (thistype in '1silbwu') or (thistype in 'Oc') cmplx = thistype in 'FD' fmtstr = "%%.%de" % precision for n in xrange(N): theline = [] for m in xrange(M): val = arr[n,m] if nofloat: thisval = str(val) elif cmplx: rval = real(val) ival = imag(val) thisval = eval('fmtstr % rval') istr = eval('fmtstr % ival') if (ival >= 0): thisval = '%s+j%s' % (thisval, istr) else: thisval = '%s-j%s' % (thisval, istr) else: thisval = eval('fmtstr % val') theline.append(thisval) strline = col_sep.join(theline) thestr.append(strline) return row_sep.join(thestr)
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
if nofloat:
if ss and abs(val) < cmpnum: val = 0*val if nofloat or val==0:
def str_array(arr, precision=5,col_sep=' ',row_sep="\n"): thestr = [] arr = asarray(arr) N,M = arr.shape thistype = arr.typecode() nofloat = (thistype in '1silbwu') or (thistype in 'Oc') cmplx = thistype in 'FD' fmtstr = "%%.%de" % precision for n in xrange(N): theline = [] for m in xrange(M): val = arr[n,m] if nofloat: thisval = str(val) elif cmplx: rval = real(val) ival = imag(val) thisval = eval('fmtstr % rval') istr = eval('fmtstr % ival') if (ival >= 0): thisval = '%s+j%s' % (thisval, istr) else: thisval = '%s-j%s' % (thisval, istr) else: thisval = eval('fmtstr % val') theline.append(thisval) strline = col_sep.join(theline) thestr.append(strline) return row_sep.join(thestr)
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
istr = eval('fmtstr % ival') if (ival >= 0):
if (ival >= 0): istr = eval('fmtstr % ival')
def str_array(arr, precision=5,col_sep=' ',row_sep="\n"): thestr = [] arr = asarray(arr) N,M = arr.shape thistype = arr.typecode() nofloat = (thistype in '1silbwu') or (thistype in 'Oc') cmplx = thistype in 'FD' fmtstr = "%%.%de" % precision for n in xrange(N): theline = [] for m in xrange(M): val = arr[n,m] if nofloat: thisval = str(val) elif cmplx: rval = real(val) ival = imag(val) thisval = eval('fmtstr % rval') istr = eval('fmtstr % ival') if (ival >= 0): thisval = '%s+j%s' % (thisval, istr) else: thisval = '%s-j%s' % (thisval, istr) else: thisval = eval('fmtstr % val') theline.append(thisval) strline = col_sep.join(theline) thestr.append(strline) return row_sep.join(thestr)
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
precision=5, keep_open=0):
precision=5, suppress_small=0, keep_open=0):
def write_array(fileobject, arr, separator=" ", linesep='\n', precision=5, keep_open=0): """Write a rank-2 or less array to file represented by fileobject. Inputs: fileobject -- An open file object or a string to a valid filename. arr -- The array to write. separator -- separator to write between elements of the array. linesep -- separator to write between rows of array precision -- number of digits after the decimal place to write. keep_open = non-zero to return the open file, otherwise, the file is closed. Outputs: file -- The open file (if keep_open is non-zero) """ file = get_open_file(fileobject, mode='wa') rank = Numeric.rank(arr) if rank > 2: raise ValueError, "Can-only write up to 2-D arrays." if rank == 0: h = 1 arr = Numeric.reshape(arr, (1,1)) elif rank == 1: h = Numeric.shape(arr)[0] arr = Numeric.reshape(arr, (h,1)) else: h = Numeric.shape(arr)[0] arr = asarray(arr) for ch in separator: if ch in '0123456789-+FfeEgGjJIi.': raise ValueError, "Bad string for separator" astr = str_array(arr, precision=precision, col_sep=separator, row_sep=linesep) file.write(astr) if keep_open: return file else: file.close() return
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
def write_array(fileobject, arr, separator=" ", linesep='\n', precision=5, keep_open=0): """Write a rank-2 or less array to file represented by fileobject. Inputs: fileobject -- An open file object or a string to a valid filename. arr -- The array to write. separator -- separator to write between elements of the array. linesep -- separator to write between rows of array precision -- number of digits after the decimal place to write. keep_open = non-zero to return the open file, otherwise, the file is closed. Outputs: file -- The open file (if keep_open is non-zero) """ file = get_open_file(fileobject, mode='wa') rank = Numeric.rank(arr) if rank > 2: raise ValueError, "Can-only write up to 2-D arrays." if rank == 0: h = 1 arr = Numeric.reshape(arr, (1,1)) elif rank == 1: h = Numeric.shape(arr)[0] arr = Numeric.reshape(arr, (h,1)) else: h = Numeric.shape(arr)[0] arr = asarray(arr) for ch in separator: if ch in '0123456789-+FfeEgGjJIi.': raise ValueError, "Bad string for separator" astr = str_array(arr, precision=precision, col_sep=separator, row_sep=linesep) file.write(astr) if keep_open: return file else: file.close() return
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
def write_array(fileobject, arr, separator=" ", linesep='\n', precision=5, keep_open=0): """Write a rank-2 or less array to file represented by fileobject. Inputs: fileobject -- An open file object or a string to a valid filename. arr -- The array to write. separator -- separator to write between elements of the array. linesep -- separator to write between rows of array precision -- number of digits after the decimal place to write. keep_open = non-zero to return the open file, otherwise, the file is closed. Outputs: file -- The open file (if keep_open is non-zero) """ file = get_open_file(fileobject, mode='wa') rank = Numeric.rank(arr) if rank > 2: raise ValueError, "Can-only write up to 2-D arrays." if rank == 0: h = 1 arr = Numeric.reshape(arr, (1,1)) elif rank == 1: h = Numeric.shape(arr)[0] arr = Numeric.reshape(arr, (h,1)) else: h = Numeric.shape(arr)[0] arr = asarray(arr) for ch in separator: if ch in '0123456789-+FfeEgGjJIi.': raise ValueError, "Bad string for separator" astr = str_array(arr, precision=precision, col_sep=separator, row_sep=linesep) file.write(astr) if keep_open: return file else: file.close() return
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
col_sep=separator, row_sep=linesep)
col_sep=separator, row_sep=linesep, ss = suppress_small)
def write_array(fileobject, arr, separator=" ", linesep='\n', precision=5, keep_open=0): """Write a rank-2 or less array to file represented by fileobject. Inputs: fileobject -- An open file object or a string to a valid filename. arr -- The array to write. separator -- separator to write between elements of the array. linesep -- separator to write between rows of array precision -- number of digits after the decimal place to write. keep_open = non-zero to return the open file, otherwise, the file is closed. Outputs: file -- The open file (if keep_open is non-zero) """ file = get_open_file(fileobject, mode='wa') rank = Numeric.rank(arr) if rank > 2: raise ValueError, "Can-only write up to 2-D arrays." if rank == 0: h = 1 arr = Numeric.reshape(arr, (1,1)) elif rank == 1: h = Numeric.shape(arr)[0] arr = Numeric.reshape(arr, (h,1)) else: h = Numeric.shape(arr)[0] arr = asarray(arr) for ch in separator: if ch in '0123456789-+FfeEgGjJIi.': raise ValueError, "Bad string for separator" astr = str_array(arr, precision=precision, col_sep=separator, row_sep=linesep) file.write(astr) if keep_open: return file else: file.close() return
40e0fe0c805bb4f35d9d3048b3da501100edec66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40e0fe0c805bb4f35d9d3048b3da501100edec66/array_import.py
class test_trapz(unittest.TestCase): def check_basic(self): pass class test_diff(unittest.TestCase): pass class test_corrcoef(unittest.TestCase): pass class test_cov(unittest.TestCase): pass class test_squeeze(unittest.TestCase): pass class test_sinc(unittest.TestCase): pass class test_angle(unittest.TestCase): pass
def check_basic(self): ba = [1,2,10,11,6,5,4] ba2 = [[1,2,3,4],[5,6,7,9],[10,3,4,5]] for ctype in ['1','b','s','i','l','f','d','F','D']: a = array(ba,ctype) a2 = array(ba2,ctype) if ctype in ['1', 'b']: self.failUnlessRaises(ArithmeticError, cumprod, a) self.failUnlessRaises(ArithmeticError, cumprod, a2, 1) self.failUnlessRaises(ArithmeticError, cumprod, a) else: assert_array_equal(cumprod(a), array([1, 2, 20, 220, 1320, 6600, 26400],ctype)) assert_array_equal(cumprod(a2), array([[ 1, 2, 3, 4], [ 5, 12, 21, 36], [50, 36, 84, 180]],ctype)) assert_array_equal(cumprod(a2,axis=1), array([[ 1, 2, 6, 24], [ 5, 30, 210, 1890], [10, 30, 120, 600]],ctype))
29256e3aa20abffd3c81d460919764efd847bb01 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/29256e3aa20abffd3c81d460919764efd847bb01/test_basic.py
argout = (array(0,mtype).itemsize,mtype) return argout
newarr = array(0,mtype) return newarr.itemsize, newarr.dtype.char
def getsize_type(mtype): if mtype in ['B','uchar','byte','unsigned char','integer*1', 'int8']: mtype = 'B' elif mtype in ['h','schar', 'signed char']: mtype = 'B' elif mtype in ['h','short','int16','integer*2']: mtype = 'h' elif mtype in ['H','ushort','uint16','unsigned short']: mtype = 'H' elif mtype in ['i','int']: mtype = 'i' elif mtype in ['i','uint','uint32','unsigned int']: mtype = 'I' elif mtype in ['l','long','int32','integer*4']: mtype = 'l' elif mtype in ['f','float','float32','real*4', 'real']: mtype = 'f' elif mtype in ['d','double','float64','real*8', 'double precision']: mtype = 'd' elif mtype in ['F','complex float','complex*8','complex64']: mtype = 'F' elif mtype in ['D','complex*16','complex128','complex','complex double']: mtype = 'D' else: mtype = obj2sctype(mtype) argout = (array(0,mtype).itemsize,mtype) return argout
5449774b4aeff6ae8615db9bc774321818398b25 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5449774b4aeff6ae8615db9bc774321818398b25/mio.py
q = arr(where(left*(q==q), 1-q, q))
q = arr(where(left+q-q, 1-q, q))
def weibullppf(q, shape, left=0, loc=0.0, scale=1.0): a, b, loc, q, left = map(arr,(shape, scale, loc, q, left)) cond1 = (a>0) & (b>0) & (0<=q) & (q<=1) q = arr(where(left*(q==q), 1-q, q)) vals = pow(arr(log(1.0/arr(1-q))),1.0/a) return select([1-cond1,left==0], [scipy.nan, b*vals+loc], -b*vals+loc)
960a2ed8f8904a14f20f200b2415ed0b75b0404c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/960a2ed8f8904a14f20f200b2415ed0b75b0404c/distributions.py
if (N%2 != 0): raise ValueError, "Length of sequence must be even." xtilde = 0.0*x
even = (N%2 == 0)
def dct(x,axis=-1): n = len(x.shape) N = x.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." xtilde = 0.0*x slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2) for k in range(4): slices[k] = tuple(slices[k]) xtilde[slices[0]] = x[slices[1]] xtilde[slices[2]] = x[slices[3]] Xt = scipy.fft(xtilde,axis=axis) pk = exp(-1j*pi*arange(N)/(2*N)) newshape = ones(n) newshape[axis] = N pk.shape = newshape return squeeze(real(Xt*pk))
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
slices[k].append(slice(None)) slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2)
slices[k].append(slice(None)) if even: xtilde = 0.0*x slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2) else: newshape = list(x.shape) newshape[axis] = 2*N xtilde = sb.empty(newshape,sb.Float) slices[0][axis] = slice(None,N) slices[2][axis] = slice(N,None) slices[3][axis] = slice(None,None,-1)
def dct(x,axis=-1): n = len(x.shape) N = x.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." xtilde = 0.0*x slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2) for k in range(4): slices[k] = tuple(slices[k]) xtilde[slices[0]] = x[slices[1]] xtilde[slices[2]] = x[slices[3]] Xt = scipy.fft(xtilde,axis=axis) pk = exp(-1j*pi*arange(N)/(2*N)) newshape = ones(n) newshape[axis] = N pk.shape = newshape return squeeze(real(Xt*pk))
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
pk = exp(-1j*pi*arange(N)/(2*N)) newshape = ones(n)
pk = sb.exp(-1j*pi*sb.arange(N)/(2*N)) newshape = sb.ones(n)
def dct(x,axis=-1): n = len(x.shape) N = x.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." xtilde = 0.0*x slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2) for k in range(4): slices[k] = tuple(slices[k]) xtilde[slices[0]] = x[slices[1]] xtilde[slices[2]] = x[slices[3]] Xt = scipy.fft(xtilde,axis=axis) pk = exp(-1j*pi*arange(N)/(2*N)) newshape = ones(n) newshape[axis] = N pk.shape = newshape return squeeze(real(Xt*pk))
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
return squeeze(real(Xt*pk))
if not even: pk /= 2; Xt = Xt[slices[0]] return sb.real(Xt*pk)
def dct(x,axis=-1): n = len(x.shape) N = x.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." xtilde = 0.0*x slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,N/2) slices[1][axis] = slice(None,None,2) slices[2][axis] = slice(N/2,None) slices[3][axis] = slice(N,None,-2) for k in range(4): slices[k] = tuple(slices[k]) xtilde[slices[0]] = x[slices[1]] xtilde[slices[2]] = x[slices[3]] Xt = scipy.fft(xtilde,axis=axis) pk = exp(-1j*pi*arange(N)/(2*N)) newshape = ones(n) newshape[axis] = N pk.shape = newshape return squeeze(real(Xt*pk))
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
N = v.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." k = arange(N) ak = sb.r_[1.0,[2]*(N-1)]*exp(1j*pi*k/(2*N)) newshape = ones(n) newshape[axis] = N ak.shape = newshape xhat = real(scipy.ifft(v*ak,axis=axis)) x = 0.0*v
N = v.shape[axis] even = (N%2 == 0)
def idct(v,axis=-1): n = len(v.shape) N = v.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." k = arange(N) ak = sb.r_[1.0,[2]*(N-1)]*exp(1j*pi*k/(2*N)) newshape = ones(n) newshape[axis] = N ak.shape = newshape xhat = real(scipy.ifft(v*ak,axis=axis)) x = 0.0*v slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,None,2) slices[1][axis] = slice(None,N/2) slices[2][axis] = slice(N,None,-2) slices[3][axis] = slice(N/2,None) for k in range(4): slices[k] = tuple(slices[k]) x[slices[0]] = xhat[slices[1]] x[slices[2]] = xhat[slices[3]] return x
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
slices[k].append(slice(None)) slices[0][axis] = slice(None,None,2) slices[1][axis] = slice(None,N/2) slices[2][axis] = slice(N,None,-2) slices[3][axis] = slice(N/2,None) for k in range(4): slices[k] = tuple(slices[k]) x[slices[0]] = xhat[slices[1]] x[slices[2]] = xhat[slices[3]] return x
slices[k].append(slice(None)) k = arange(N) if even: ak = sb.r_[1.0,[2]*(N-1)]*exp(1j*pi*k/(2*N)) newshape = ones(n) newshape[axis] = N ak.shape = newshape xhat = real(scipy.ifft(v*ak,axis=axis)) x = 0.0*v slices[0][axis] = slice(None,None,2) slices[1][axis] = slice(None,N/2) slices[2][axis] = slice(N,None,-2) slices[3][axis] = slice(N/2,None) for k in range(4): slices[k] = tuple(slices[k]) x[slices[0]] = xhat[slices[1]] x[slices[2]] = xhat[slices[3]] return x else: ak = 2*sb.exp(1j*pi*k/(2*N)) newshape = ones(n) newshape[axis] = N ak.shape = newshape newshape = list(v.shape) newshape[axis] = 2*N Y = zeros(newshape,sb.Complex) slices[0][axis] = slice(None,N) slices[1][axis] = slice(None,None) slices[2][axis] = slice(N+1,None) slices[3][axis] = slice((N-1),0,-1) Y[slices[0]] = ak*v Y[slices[2]] = conj(Y[slices[3]]) x = real(scipy.ifft(Y,axis=axis))[slices[0]] return x
def idct(v,axis=-1): n = len(v.shape) N = v.shape[axis] if (N%2 != 0): raise ValueError, "Length of sequence must be even." k = arange(N) ak = sb.r_[1.0,[2]*(N-1)]*exp(1j*pi*k/(2*N)) newshape = ones(n) newshape[axis] = N ak.shape = newshape xhat = real(scipy.ifft(v*ak,axis=axis)) x = 0.0*v slices = [None]*4 for k in range(4): slices[k] = [] for j in range(n): slices[k].append(slice(None)) slices[0][axis] = slice(None,None,2) slices[1][axis] = slice(None,N/2) slices[2][axis] = slice(N,None,-2) slices[3][axis] = slice(N/2,None) for k in range(4): slices[k] = tuple(slices[k]) x[slices[0]] = xhat[slices[1]] x[slices[2]] = xhat[slices[3]] return x
8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6c0e7c7c6d0ef9ca4896d8af69746dcb51ce58/transforms.py
def yield():
def Yield():
def yield(): if not threaded(): # forces the event handlers to finish their work. # this also forces deletion of windows. wxYield() else: time.sleep(0.05) # sync threads
243de5d394f54b6172aeb50925b882a9dcdb8125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/243de5d394f54b6172aeb50925b882a9dcdb8125/test_gui_thread.py
yield()
Yield()
def check_wx_class(self): "Checking a wxFrame proxied class" for i in range(5): f = gui_thread.register(TestFrame) a = f(None) p = weakref.ref(a) a.Close(1) del a yield() # this checks for memory leaks self.assertEqual(is_alive(p), 0)
243de5d394f54b6172aeb50925b882a9dcdb8125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/243de5d394f54b6172aeb50925b882a9dcdb8125/test_gui_thread.py
yield()
Yield()
def check_exception(self): "Checking exception handling" f = gui_thread.register(TestFrame) a = f(None) p = weakref.ref(a) self.assertRaises(TypeError, a.Close, 1, 2, 3) a.Close() del a yield() # this checks for memory leaks self.assertEqual(is_alive(p), 0)
243de5d394f54b6172aeb50925b882a9dcdb8125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/243de5d394f54b6172aeb50925b882a9dcdb8125/test_gui_thread.py
y = x
y = scipy.squeeze(x)
def plot(x,*args,**keywds): """Plot curves. Description: Plot one or more curves on the same graph. Inputs: There can be a variable number of inputs which consist of pairs or triples. The second variable is plotted against the first using the linetype specified by the optional third variable in the triple. If only two plots are being compared, the x-axis does not have to be repeated. """ try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 global _hold try: _hold=keywds['hold'] except KeyError: pass try: linewidth=float(keywds['width']) except KeyError: linewidth=1.0 if _hold or override: pass else: gist.fma() gist.animate(0) savesys = gist.plsys() winnum = gist.window() if winnum < 0: gist.window(0) gist.plsys(savesys) nargs = len(args) if nargs == 0: y = x x = Numeric.arange(0,len(y)) if scipy.iscomplexobj(y): print "Warning: complex data plotting real part." y = y.real y = where(scipy.isfinite(y),y,0) gist.plg(y,x,type='solid',color='blue',marks=0,width=linewidth) return y = args[0] argpos = 1 nowplotting = 0 clear_global_linetype() while 1: try: thearg = args[argpos] except IndexError: thearg = 0 thetype,thecolor,themarker,tomark = _parse_type_arg(thearg,nowplotting) if themarker == 'Z': # args[argpos] was data or non-existent. pass append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) else: # args[argpos] was a string argpos = argpos + 1 if tomark: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]+_rmarkers[themarker]) else: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) if scipy.iscomplexobj(x) or scipy.iscomplexobj(y): print "Warning: complex data provided, using only real part." x = scipy.real(x) y = scipy.real(y) y = where(scipy.isfinite(y),y,0) y = scipy.squeeze(y) x = scipy.squeeze(x) gist.plg(y,x,type=thetype,color=thecolor,marker=themarker,marks=tomark,width=linewidth) nowplotting = nowplotting + 1 ## Argpos is pointing to the next potential triple of data. ## Now one of four things can happen: ## ## 1: argpos points to data, argpos+1 is a string ## 2: argpos points to data, end ## 3: argpos points to data, argpos+1 is data ## 4: argpos points to data, argpos+1 is data, argpos+2 is a string if argpos >= nargs: break # no more data if argpos == nargs-1: # this is a single data value. x = x y = args[argpos] argpos = argpos+1 elif type(args[argpos+1]) is types.StringType: x = x y = args[argpos] argpos = argpos+1 else: # 3 x = args[argpos] y = args[argpos+1] argpos = argpos+2 return
4e8fab37088be4d995495a2bf42d3431f99b98a5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4e8fab37088be4d995495a2bf42d3431f99b98a5/Mplot.py
z = norm(size=self._size)
z = norm.rvs(size=self._size)
def _rvs(self, c): z = norm(size=self._size) U = random(size=self._size) fac = 2 + c*c*z*z det = sqrt(fac*fac - 4) t1 = fac + det t2 = fac - det return t1*(U>0.5) + t2*(U<0.5)
946f237342bf50f49644a9813ca1ee8188f2aa5a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/946f237342bf50f49644a9813ca1ee8188f2aa5a/distributions.py
return abs(norm(mu=c,size=self._size))
return abs(norm.rvs(mu=c,size=self._size))
def _rvs(self, c): return abs(norm(mu=c,size=self._size))
946f237342bf50f49644a9813ca1ee8188f2aa5a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/946f237342bf50f49644a9813ca1ee8188f2aa5a/distributions.py
return abs(norm(size=self._size))
return abs(norm.rvs(size=self._size))
def _rvs(self): return abs(norm(size=self._size))
946f237342bf50f49644a9813ca1ee8188f2aa5a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/946f237342bf50f49644a9813ca1ee8188f2aa5a/distributions.py
return exp(s * norm(size=self._size))
return exp(s * norm.rvs(size=self._size))
def _rvs(self, s): return exp(s * norm(size=self._size))
946f237342bf50f49644a9813ca1ee8188f2aa5a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/946f237342bf50f49644a9813ca1ee8188f2aa5a/distributions.py
if val not in [0,1,2]:
if val not in [0,1,2] :
def _bvalfromboundary(boundary): try: val = _boundarydict[boundary] << 2 except KeyError: if val not in [0,1,2]: raise ValueError, "Acceptable boundary flags are 'fill', 'wrap' (or 'circular'), \n and 'symm' (or 'symmetric')." val = boundary << 2 return val
b41f545fde26161086b1e64fa57c883a55c8e511 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b41f545fde26161086b1e64fa57c883a55c8e511/signaltools.py
M,rest = divmod(header[0],1000)
M,rest = divmod(int(header[0]),1000)
def loadmat(name, dict=None, appendmat=1, basename='raw'): """Load the MATLAB(tm) mat file. If name is a full path name load it in. Otherwise search for the file on the sys.path list and load the first one found (the current directory is searched first). Both v4 (Level 1.0) and v6 matfiles are supported. Version 7.0 files are not yet supported. Inputs: name -- name of the mat file (don't need .mat extension if appendmat=1) dict -- the dictionary to insert into. If none the variables will be returned in a dictionary. appendmat -- non-zero to append the .mat extension to the end of the given filename. basename -- for MATLAB(tm) v5 matfiles raw data will have this basename. Outputs: If dict is None, then a dictionary of names and objects representing the stored arrays is returned. """ if appendmat and name[-4:] == ".mat": name = name[:-4] if os.sep in name: full_name = name if appendmat: full_name = name + ".mat" else: full_name = None junk,name = os.path.split(name) for path in sys.path: test_name = os.path.join(path,name) if appendmat: test_name += ".mat" try: fid = open(test_name,'rb') fid.close() full_name = test_name break except IOError: pass if full_name is None: raise IOError, "%s not found on the path." % name fid = fopen(full_name,'rb') test_vals = fid.fread(4,'byte') if not (0 in test_vals): # MATLAB version 5 format fid.rewind() thisdict = _loadv5(fid,basename) if dict is not None: dict.update(thisdict) return else: return thisdict testtype = struct.unpack('i',test_vals.tostring()) # Check to see if the number is positive and less than 5000. if testtype[0] < 0 or testtype[0] > 4999: # wrong byte-order if LittleEndian: format = 'ieee-be' else: format = 'ieee-le' else: # otherwise we are O.K. if LittleEndian: format = 'ieee-le' else: format = 'ieee-be' fid.setformat(format) length = fid.size() fid.rewind() # back to the begining defnames = [] thisdict = {} while 1: if (fid.tell() == length): break header = fid.fread(5,'int') if len(header) != 5: fid.close() print "Warning: Read error in file." break M,rest = divmod(header[0],1000) O,rest = divmod(rest,100) P,rest = divmod(rest,10) T = rest if (M > 1): fid.close() raise ValueError, "Unsupported binary format." if (O != 0): fid.close() raise ValuError, "Hundreds digit of first integer should be zero." if (T not in [0,1]): fid.close() raise ValueError, "Cannot handle sparse matrices, yet." storage = {0:'d',1:'f',2:'i',3:'h',4:'H',5:'B'}[P] varname = fid.fread(header[-1],'char')[:-1] varname = varname.tostring() defnames.append(varname) numels = header[1]*header[2] if T == 0: # Text data data = atleast_1d(fid.fread(numels,storage)) if header[3]: # imaginary data data2 = fid.fread(numels,storage) new = zeros(data.shape,data.dtype.char.capitalize()) new.real = data new.imag = data2 data = new del(new) del(data2) if len(data) > 1: data=data.reshape((header[2], header[1]) ) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data else: data = atleast_1d(fid.fread(numels,storage,'char')) if len(data) > 1: data=data.reshape((header[2], header[1])) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data fid.close() if dict is not None: print "Names defined = ", defnames dict.update(thisdict) else: return thisdict
e9bada72665b0b94737a77ce17abec482d2cbc42 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e9bada72665b0b94737a77ce17abec482d2cbc42/mio.py
if cfcn = None:
if cfcn is None:
def collapse (a,keepcols,collapsecols,stderr=0,ns=0,cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ if cfcn = None: cfcn = stats.mean a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,N.ArrayType]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ArrayType]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if sterr: if len(avgcol)>1: item.append(stats.sterr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist,'O') return new_a
052f2466349cfd89593eb1fb66adb0be65a5b10c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/052f2466349cfd89593eb1fb66adb0be65a5b10c/pstat.py
def figure(n=None, style='/tmp/currstyle.gs', color=-2, frame=0, labelsize=14, labelfont='helvetica',aspect=1.618,dpi=75):
def setdpi(num): """ Set the dpi for new windows """ if num in [75,100]: _dpi = num gist.set_default_dpi(_dpi) else: raise ValueError, "DPI must be 75 or 100" def figure(n=None,style='/tmp/currstyle.gs', color=-2, frame=0, labelsize=14, labelfont='helvetica',aspect=1.618): global _figures
def figure(n=None, style='/tmp/currstyle.gs', color=-2, frame=0, labelsize=14, labelfont='helvetica',aspect=1.618,dpi=75): if (aspect < 0.1) or (aspect > 10): aspect = 1.618 if isinstance(color, types.StringType): color = _colornum[color] fid = open(style,'w') syst = write_style.getsys(color=color,frame=frame, labelsize=labelsize,font=labelfont) cntr = (5.5*inches,4.25*inches) # horizontal, vertical height = 4.25*inches width = aspect*height syst['viewport'] = [cntr[0]-width/2.0,cntr[0]+width/2.0,cntr[1]-height/2.0,cntr[1]+height/2.0] fid.write(write_style.style2string(syst,landscape=1)) fid.close() if n is None: gist.window(style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) else: gist.window(n,style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) _current_style = style return
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
gist.window(style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) else: gist.window(n,style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) _current_style = style
winnum = gist.window(style=style,width=int(width*1.25/inches*_dpi),height=int(height*1.4/inches*_dpi)) if winnum < 0: gist.window(style=style,width=int(width*1.25/inches*_dpi),height=int(height*1.4/inches*_dpi)) else: gist.window(n,style=style,width=int(width*1.25/inches*_dpi),height=int(height*1.4/inches*_dpi)) _current_style = style
def figure(n=None, style='/tmp/currstyle.gs', color=-2, frame=0, labelsize=14, labelfont='helvetica',aspect=1.618,dpi=75): if (aspect < 0.1) or (aspect > 10): aspect = 1.618 if isinstance(color, types.StringType): color = _colornum[color] fid = open(style,'w') syst = write_style.getsys(color=color,frame=frame, labelsize=labelsize,font=labelfont) cntr = (5.5*inches,4.25*inches) # horizontal, vertical height = 4.25*inches width = aspect*height syst['viewport'] = [cntr[0]-width/2.0,cntr[0]+width/2.0,cntr[1]-height/2.0,cntr[1]+height/2.0] fid.write(write_style.style2string(syst,landscape=1)) fid.close() if n is None: gist.window(style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) else: gist.window(n,style=style,width=int(width*1.25/inches*dpi),height=int(height*1.4/inches*dpi),dpi=dpi) _current_style = style return
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
def full_page(win,dpi=75): gist.window(win,style=_current_style,width=int(dpi*8.5),height=dpi*11,dpi=dpi)
def full_page(win): gist.window(win,style=_current_style,width=int(_dpi*8.5),height=_dpi*11)
def full_page(win,dpi=75): gist.window(win,style=_current_style,width=int(dpi*8.5),height=dpi*11,dpi=dpi)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1):
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,ticks=1):
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
if dpi != 100: dpi = 75
if _dpi != 100:
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
conv = inches *1.0 / dpi
conv = inches *1.0 / _dpi
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi):
maxw = 11*_dpi maxh = 8.5*_dpi if ph > (8.5*_dpi) and pw < (8.5*_dpi):
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
maxh = 11*dpi maxw = 8.5*dpi
maxh = 11*_dpi maxw = 8.5*_dpi
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
cntr = array([5.5,4.25])*dpi else: cntr = array([4.25,6.75])*dpi
cntr = array([5.5,4.25])*_dpi else: cntr = array([4.25,6.75])*_dpi
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
gist.window(win,style=_current_style,width=int(pw),height=int(ph))
def subplot(Numy,Numx,win=0,pw=None,ph=None,hsep=100,vsep=100,color='black',frame=0,fontsize=8,font=None,dpi=100,ticks=1): # Use gist.plsys to change coordinate systems # all inputs (except fontsize) given as pixels, gist wants # things in normalized device # coordinate. Window is brought up with center of window at # center of 8.5 x 11 inch page: in landscape mode (5.25, 4.25) # or at position (4.25,6.75) for portrait mode msg = 1 if pw is None: pw = Numx*300 msg = 0 if ph is None: ph = Numy*300 msg = 0 maxwidth=int(os.environ.get('XPLT_MAXWIDTH')) maxheight=int(os.environ.get('XPLT_MAXHEIGHT')) printit = 0 if ph > maxheight: ph = maxheight printit = 1 if pw > maxwidth: pw = maxwidth printit = 1 if dpi != 100: dpi = 75 fontsize = 12 conv = inches *1.0 / dpi # multiply by this factor to convert pixels to # NDC # Use landscape mode unless requested height is large land = 1 maxw = 11*dpi maxh = 8.5*dpi if ph > (8.5*dpi) and pw < (8.5*dpi): land = 0 maxh = 11*dpi maxw = 8.5*dpi if ph > maxh: ph = maxh printit=1 if pw > maxw: pw = maxw printit=1 if printit and msg: message = "Warning: Requested height and width too large.\n" message +="Changing to %d x %d" % (pw,ph) print message # Now we've got a suitable height and width if land: cntr = array([5.5,4.25])*dpi # landscape else: cntr = array([4.25,6.75])*dpi # portrait Yspace = ph/float(Numy)*conv Xspace = pw/float(Numx)*conv hsep = hsep * conv vsep = vsep * conv ytop = (cntr[1]+ph/2.0)*conv xleft = (cntr[0]-pw/2.0)*conv if type(color) is types.StringType: color = _colornum[color] systems=[] ind = -1 for nY in range(Numy): ystart = ytop - (nY+1)*Yspace for nX in range(Numx): xstart = xleft + nX*Xspace systems.append({}) systems[-1]['viewport'] = [xstart+hsep/2.0,xstart+Xspace-hsep/2.0,ystart+vsep/2.0,ystart+Yspace-vsep/2.0] if font is not None or fontsize is not None: _chng_font(systems[-1],font,fontsize) if color != -3 or frame != 0: _add_color(systems[-1],color,frame=frame) if ticks != 1: _remove_ticks(systems[-1]) _current_style='/tmp/subplot%s.gs' % win fid = open(_current_style,'w') fid.write(write_style.style2string(systems,landscape=land)) fid.close() gist.winkill(win) gist.window(win,style=_current_style,width=int(pw),height=int(ph),dpi=100)
52cc5149afc80e6ec63ff8f313429740b6845b1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52cc5149afc80e6ec63ff8f313429740b6845b1a/Mplot.py
q=scipy.split(a,len(self.workers)) herd.cluster.loop_code(name+'=_q_','_q_',inputs={'_q_':q},returns=(),global_vars=(name,)) def row_rather(self,name):
import scipy q=scipy.split(sequence,len(self.workers)) self.loop_code(name+'=_q_','_q_',inputs={'_q_':q},returns=(),global_vars=(name,)) def row_gather(self,name):
def row_split(self,name,sequence): """experimental""" q=scipy.split(a,len(self.workers)) herd.cluster.loop_code(name+'=_q_','_q_',inputs={'_q_':q},returns=(),global_vars=(name,))
c58c5bf3d37c071c867568af46cd55d7fbba918a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c58c5bf3d37c071c867568af46cd55d7fbba918a/cow.py
concatenate(herd.cluster.[name])
from Numeric import concatenate return concatenate(self[name])
def row_rather(self,name): """experimental""" concatenate(herd.cluster.[name])
c58c5bf3d37c071c867568af46cd55d7fbba918a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c58c5bf3d37c071c867568af46cd55d7fbba918a/cow.py
elif p<81:
elif p<35:
def daub(p): """The coefficients for the FIR low-pass filter producing Daubechies wavelets. p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients. """ sqrt = sb.sqrt assert(p>=1) if p==1: c = 1/sqrt(2) return sb.array([c,c]) elif p==2: f = sqrt(2)/8 c = sqrt(3) return f*sb.array([1+c,3+c,3-c,1-c]) elif p==3: tmp = 12*sqrt(10) z1 = 1.5 + sqrt(15+tmp)/6 - 1j*(sqrt(15)+sqrt(tmp-15))/6 z1c = sb.conj(z1) f = sqrt(2)/8 d0 = sb.real((1-z1)*(1-z1c)) a0 = sb.real(z1*z1c) a1 = 2*sb.real(z1) return f/d0*sb.array([a0, 3*a0-a1, 3*a0-3*a1+1, a0-3*a1+3, 3-a1, 1]) elif p<81: # construct polynomial and factor it if p<35: P = [s.comb(p-1+k,k,exact=1) for k in range(p)][::-1] yj = sb.roots(P) else: raise ValueError, "Cannot factor such large polynomial well." k = sb.r_[0:p] P = s.comb(p-1+k,k)/4.0**k yj = sb.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial c = sb.poly1d([1,1])**p q = sb.poly1d([1]) for k in range(p-1): yval = yj[k] part = 2*sqrt(yval*(yval-1)) const = 1-2*yval z1 = const + part if (abs(z1)) < 1: z1 = const - part q = q * [1,-z1] q = sb.real(q) * c # Normalize result q = q / sb.sum(q) * sqrt(2) return q.c[::-1] else: raise ValueError, "Polynomial factorization does not work "\ "well for p too large."
211104491912e864f4dc4ee796e188ee8dd120f7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/211104491912e864f4dc4ee796e188ee8dd120f7/wavelets.py
else: raise ValueError, "Cannot factor such large polynomial well." k = sb.r_[0:p] P = s.comb(p-1+k,k)/4.0**k
else: P = [s.comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1]
def daub(p): """The coefficients for the FIR low-pass filter producing Daubechies wavelets. p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients. """ sqrt = sb.sqrt assert(p>=1) if p==1: c = 1/sqrt(2) return sb.array([c,c]) elif p==2: f = sqrt(2)/8 c = sqrt(3) return f*sb.array([1+c,3+c,3-c,1-c]) elif p==3: tmp = 12*sqrt(10) z1 = 1.5 + sqrt(15+tmp)/6 - 1j*(sqrt(15)+sqrt(tmp-15))/6 z1c = sb.conj(z1) f = sqrt(2)/8 d0 = sb.real((1-z1)*(1-z1c)) a0 = sb.real(z1*z1c) a1 = 2*sb.real(z1) return f/d0*sb.array([a0, 3*a0-a1, 3*a0-3*a1+1, a0-3*a1+3, 3-a1, 1]) elif p<81: # construct polynomial and factor it if p<35: P = [s.comb(p-1+k,k,exact=1) for k in range(p)][::-1] yj = sb.roots(P) else: raise ValueError, "Cannot factor such large polynomial well." k = sb.r_[0:p] P = s.comb(p-1+k,k)/4.0**k yj = sb.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial c = sb.poly1d([1,1])**p q = sb.poly1d([1]) for k in range(p-1): yval = yj[k] part = 2*sqrt(yval*(yval-1)) const = 1-2*yval z1 = const + part if (abs(z1)) < 1: z1 = const - part q = q * [1,-z1] q = sb.real(q) * c # Normalize result q = q / sb.sum(q) * sqrt(2) return q.c[::-1] else: raise ValueError, "Polynomial factorization does not work "\ "well for p too large."
211104491912e864f4dc4ee796e188ee8dd120f7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/211104491912e864f4dc4ee796e188ee8dd120f7/wavelets.py
ext_args['macros'] = [('ATLAS_INFO',atlas_version)]
ext_args['define_macros'] = [('ATLAS_INFO',atlas_version)]
def configuration(parent_package=''): from scipy_distutils.core import Extension from scipy_distutils.misc_util import fortran_library_item, dot_join,\ SourceGenerator, get_path, default_config_dict, get_build_temp from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None temp_path = os.path.join(get_build_temp(),'linalg','atlas_version') dir_util.mkpath(temp_path,verbose=1) atlas_version_file = os.path.join(temp_path,'atlas_version') if atlas_info: if os.path.isfile(atlas_version_file): atlas_version = open(atlas_version_file).read() print 'ATLAS version',atlas_version if atlas_info and atlas_version is None: # Try to determine ATLAS version shutil.copy(os.path.join(local_path,'atlas_version.c'),temp_path) cur_dir = os.getcwd() os.chdir(temp_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1_pre3.3.6' print 'ATLAS version',atlas_version else: print o else: print o os.chdir(cur_dir) if atlas_version is None: print 'Failed to determine ATLAS version' else: f = open(atlas_version_file,'w') f.write(atlas_version) f.close() if atlas_info: if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'] + \ [os.path.join(local_path,'src','fblaswrap.f')], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) target_dir = '' skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: target_dir = 'dbl' skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if using_lapack_blas: target_dir = os.path.join(target_dir,'blas') skip_names['fblas'].extend(\ 'drotmg srotmg drotm srotm'.split()) if atlas_version=='3.2.1_pre3.3.6': target_dir = os.path.join(target_dir,'atlas321') skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) # atlas_version: ext_args = {'name':dot_join(parent_package,package,'atlas_version'), 'sources':[os.path.join(local_path,'atlas_version.c')]} if atlas_info: ext_args['libraries'] = [atlas_info['libraries'][-1]] ext_args['library_dirs'] = atlas_info['library_dirs'][:] ext_args['macros'] = [('ATLAS_INFO',atlas_version)] else: ext_args['macros'] = [('NO_ATLAS_INFO',1)] ext = Extension(**ext_args) config['ext_modules'].append(ext) # In case any of atlas|lapack|blas libraries are not available def generate_empty_pyf(target,sources,generator,skips): name = os.path.basename(target)[:-4] f = open(target,'w') f.write('python module '+name+'\n') f.write('usercode void empty_module(void) {}\n') f.write('interface\n') f.write('subroutine empty_module()\n') f.write('intent(c) empty_module\n') f.write('end subroutine empty_module\n') f.write('end interface\nend python module'+name+'\n') f.close() # fblas: def generate_fblas_pyf(target,sources,generator,skips): generator('fblas',sources[0],target,skips) if not (blas_info or atlas_info): generate_fblas_pyf = generate_empty_pyf sources = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f')] sources = [os.path.join(local_path,s) for s in sources] fblas_pyf = SourceGenerator(generate_fblas_pyf, os.path.join(target_dir,'fblas.pyf'), sources,generate_interface, skip_names['fblas']) ext_args = {'name':dot_join(parent_package,package,'fblas'), 'sources':[fblas_pyf,sources[-1]]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # cblas: def generate_cblas_pyf(target,sources,generator,skips): generator('cblas',sources[0],target,skips) if not atlas_info: generate_cblas_pyf = generate_empty_pyf sources = ['generic_cblas.pyf', 'generic_cblas1.pyf'] sources = [os.path.join(local_path,s) for s in sources] cblas_pyf = SourceGenerator(generate_cblas_pyf, os.path.join(target_dir,'cblas.pyf'), sources,generate_interface, skip_names['cblas']) ext_args = {'name':dot_join(parent_package,package,'cblas'), 'sources':[cblas_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # flapack: def generate_flapack_pyf(target,sources,generator,skips): generator('flapack',sources[0],target,skips) if not (lapack_info or atlas_info): generate_flapack_pyf = generate_empty_pyf sources = ['generic_flapack.pyf','flapack_user_routines.pyf'] sources = [os.path.join(local_path,s) for s in sources] flapack_pyf = SourceGenerator(generate_flapack_pyf, os.path.join(target_dir,'flapack.pyf'), sources,generate_interface, skip_names['flapack']) ext_args = {'name':dot_join(parent_package,package,'flapack'), 'sources':[flapack_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # clapack: def generate_clapack_pyf(target,sources,generator,skips): generator('clapack',sources[0],target,skips) if not atlas_info: generate_cblas_pyf = generate_empty_pyf sources = ['generic_clapack.pyf'] sources = [os.path.join(local_path,s) for s in sources] clapack_pyf = SourceGenerator(generate_clapack_pyf, os.path.join(target_dir,'clapack.pyf'), sources,generate_interface, skip_names['clapack']) ext_args = {'name':dot_join(parent_package,package,'clapack'), 'sources':[clapack_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # _flinalg: flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) # calc_lwork: ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
303e897a5aa28fc412316a62bc8f381a81cca8a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/303e897a5aa28fc412316a62bc8f381a81cca8a7/setup_linalg.py
ext_args['macros'] = [('NO_ATLAS_INFO',1)]
ext_args['define_macros'] = [('NO_ATLAS_INFO',1)]
def configuration(parent_package=''): from scipy_distutils.core import Extension from scipy_distutils.misc_util import fortran_library_item, dot_join,\ SourceGenerator, get_path, default_config_dict, get_build_temp from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None temp_path = os.path.join(get_build_temp(),'linalg','atlas_version') dir_util.mkpath(temp_path,verbose=1) atlas_version_file = os.path.join(temp_path,'atlas_version') if atlas_info: if os.path.isfile(atlas_version_file): atlas_version = open(atlas_version_file).read() print 'ATLAS version',atlas_version if atlas_info and atlas_version is None: # Try to determine ATLAS version shutil.copy(os.path.join(local_path,'atlas_version.c'),temp_path) cur_dir = os.getcwd() os.chdir(temp_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1_pre3.3.6' print 'ATLAS version',atlas_version else: print o else: print o os.chdir(cur_dir) if atlas_version is None: print 'Failed to determine ATLAS version' else: f = open(atlas_version_file,'w') f.write(atlas_version) f.close() if atlas_info: if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'] + \ [os.path.join(local_path,'src','fblaswrap.f')], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) target_dir = '' skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: target_dir = 'dbl' skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if using_lapack_blas: target_dir = os.path.join(target_dir,'blas') skip_names['fblas'].extend(\ 'drotmg srotmg drotm srotm'.split()) if atlas_version=='3.2.1_pre3.3.6': target_dir = os.path.join(target_dir,'atlas321') skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) # atlas_version: ext_args = {'name':dot_join(parent_package,package,'atlas_version'), 'sources':[os.path.join(local_path,'atlas_version.c')]} if atlas_info: ext_args['libraries'] = [atlas_info['libraries'][-1]] ext_args['library_dirs'] = atlas_info['library_dirs'][:] ext_args['macros'] = [('ATLAS_INFO',atlas_version)] else: ext_args['macros'] = [('NO_ATLAS_INFO',1)] ext = Extension(**ext_args) config['ext_modules'].append(ext) # In case any of atlas|lapack|blas libraries are not available def generate_empty_pyf(target,sources,generator,skips): name = os.path.basename(target)[:-4] f = open(target,'w') f.write('python module '+name+'\n') f.write('usercode void empty_module(void) {}\n') f.write('interface\n') f.write('subroutine empty_module()\n') f.write('intent(c) empty_module\n') f.write('end subroutine empty_module\n') f.write('end interface\nend python module'+name+'\n') f.close() # fblas: def generate_fblas_pyf(target,sources,generator,skips): generator('fblas',sources[0],target,skips) if not (blas_info or atlas_info): generate_fblas_pyf = generate_empty_pyf sources = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f')] sources = [os.path.join(local_path,s) for s in sources] fblas_pyf = SourceGenerator(generate_fblas_pyf, os.path.join(target_dir,'fblas.pyf'), sources,generate_interface, skip_names['fblas']) ext_args = {'name':dot_join(parent_package,package,'fblas'), 'sources':[fblas_pyf,sources[-1]]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # cblas: def generate_cblas_pyf(target,sources,generator,skips): generator('cblas',sources[0],target,skips) if not atlas_info: generate_cblas_pyf = generate_empty_pyf sources = ['generic_cblas.pyf', 'generic_cblas1.pyf'] sources = [os.path.join(local_path,s) for s in sources] cblas_pyf = SourceGenerator(generate_cblas_pyf, os.path.join(target_dir,'cblas.pyf'), sources,generate_interface, skip_names['cblas']) ext_args = {'name':dot_join(parent_package,package,'cblas'), 'sources':[cblas_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # flapack: def generate_flapack_pyf(target,sources,generator,skips): generator('flapack',sources[0],target,skips) if not (lapack_info or atlas_info): generate_flapack_pyf = generate_empty_pyf sources = ['generic_flapack.pyf','flapack_user_routines.pyf'] sources = [os.path.join(local_path,s) for s in sources] flapack_pyf = SourceGenerator(generate_flapack_pyf, os.path.join(target_dir,'flapack.pyf'), sources,generate_interface, skip_names['flapack']) ext_args = {'name':dot_join(parent_package,package,'flapack'), 'sources':[flapack_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # clapack: def generate_clapack_pyf(target,sources,generator,skips): generator('clapack',sources[0],target,skips) if not atlas_info: generate_cblas_pyf = generate_empty_pyf sources = ['generic_clapack.pyf'] sources = [os.path.join(local_path,s) for s in sources] clapack_pyf = SourceGenerator(generate_clapack_pyf, os.path.join(target_dir,'clapack.pyf'), sources,generate_interface, skip_names['clapack']) ext_args = {'name':dot_join(parent_package,package,'clapack'), 'sources':[clapack_pyf]} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) # _flinalg: flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) # calc_lwork: ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
303e897a5aa28fc412316a62bc8f381a81cca8a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/303e897a5aa28fc412316a62bc8f381a81cca8a7/setup_linalg.py
f.write('\ndef get_info(name): return globals().get(name,{})\n')
f.write('\ndef get_info(name): g=globals(); return g.get(name,g.get(name+"_info",{}))\n')
f.write('\ndef get_info(name): return globals().get(name,{})\n')
191555b758309b741a5195ede9fc617164eeac60 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/191555b758309b741a5195ede9fc617164eeac60/setup.py
self.obj = scipy.ppresolve(obj)
self.obj = obj
def __init__(self, parent, obj, **kw): self.parent = parent self.obj = scipy.ppresolve(obj) self.name = kw.pop('name',None) if self.name is None: self.name = self.obj.__name__ self.canedit = kw.pop('canedit',1) rend.Page.__init__(self, **kw)
0947fb4fe2320f84496f7e32f9af6abd89e11e8e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0947fb4fe2320f84496f7e32f9af6abd89e11e8e/livedocs.py
if name not in self.all:
if name not in self.all and not hasattr(self.obj,name):
def childFactory(self, context, name): if name not in self.all: print "Err 1: ", name, self.all return None child = getattr(self.obj,name,None)
0947fb4fe2320f84496f7e32f9af6abd89e11e8e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0947fb4fe2320f84496f7e32f9af6abd89e11e8e/livedocs.py
basic_inv = linalg.inverse
basic_inv = linalg.inv
def bench_random(self,level=5): import numpy.linalg as linalg basic_inv = linalg.inverse print print ' Finding matrix inverse' print ' ==================================' print ' | contiguous | non-contiguous ' print '----------------------------------------------' print ' size | scipy | basic | scipy | basic'
6d9ca1f8115ef767febee897444a5bce02675b41 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6d9ca1f8115ef767febee897444a5bce02675b41/test_basic.py
if 'X11BASE' in os.environ: X11BASE=os.environ['X11BASE'] else: X11BASE="/no/suggested/x11dir"
X11BASE=os.environ.get('X11BASE','/no/suggested/x11dir')
def config_x11(self): print print " ============= begin play/x11 configuration ==============" print from string import replace self.fatality=0
3e57eacb3bdd4984e988078fd79531a50bce71e4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3e57eacb3bdd4984e988078fd79531a50bce71e4/setup_xplt.py
libraries = ['X11']
libraries = x11_info.get('libraries','X11')
def getallparams(gistpath,local): if windows: extra_compile_args = ['-DGISTPATH="\\"' + gistpath + '\\""' ] else: extra_compile_args = ['-DGISTPATH="' + gistpath + '"' ] extra_link_args = [] if windows or cygwin: extra_compile_args.append("-DWINDOWS") extra_compile_args.append("-mwindows") extra_link_args.append("-mwindows") if cygwin: extra_compile_args.append("-DCYGWIN") include_dirs = [ 'src/gist', 'src/play', 'src/play/unix' ] if windows or cygwin: libraries = [] else: libraries = ['X11'] library_dirs = [os.path.join(local,x) for x in ['.','src']] library_dirs.extend(get_special_dirs(sys.platform)) include_dirs = [os.path.join(local,x) for x in include_dirs] if not run_config: inputfile = open(os.path.join(local,"pygist","Make.cfg")) lines = inputfile.readlines() inputfile.close() for line in lines: if line[:8]=="MATHLIB=": mathlib = line[8:-1] #removing the \n # remove the -l mathlib = mathlib[2:] libraries.append(mathlib) if line[:9]=="NO_EXP10=": no_exp10 = line[9:-1] # removing \n if no_exp10: extra_compile_args.append(no_exp10) if line[:5]=="XINC=": xinc = line[5:-1] # removing \n if xinc and not (windows or cygwin): # remove the -I xinc = xinc[2:] if xinc: include_dirs.append(xinc) if line[:5]=="XLIB=": xlib = line[5:-1] # removing \n if xlib and not (windows or cygwin): # remove the -L xlib = xlib[2:] library_dirs.append(xlib) return include_dirs, library_dirs, libraries, \ extra_compile_args, extra_link_args
3e57eacb3bdd4984e988078fd79531a50bce71e4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3e57eacb3bdd4984e988078fd79531a50bce71e4/setup_xplt.py
try: import scipy_distutils except ImportError: extra_packages.append('scipy_distutils') sys.argv.insert(0,'scipy_core')
sys.path.insert(0,'scipy_core')
def get_package_config(name): sys.path.insert(0,os.path.join('scipy_core',name)) try: mod = __import__('setup_'+name) config = mod.configuration() finally: del sys.path[0] return config
533aec567096933861fdc0c4ec174c8b5be9dee6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/533aec567096933861fdc0c4ec174c8b5be9dee6/setup_linalg.py
def ramp(x, y):
def xramp(x, y):
def ramp(x, y): return x
da3a8b0668083ce48b49a2e6838c25e9c6f741b1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/da3a8b0668083ce48b49a2e6838c25e9c6f741b1/testfuncs.py
ramp.title = 'Ramp'
xramp.title = 'X Ramp' def yramp(x, y): return y yramp.title = 'Y Ramp'
def ramp(x, y): return x
da3a8b0668083ce48b49a2e6838c25e9c6f741b1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/da3a8b0668083ce48b49a2e6838c25e9c6f741b1/testfuncs.py