diff --git a/.pylintrc b/.pylintrc index 57867dc..c499fc1 100644 --- a/.pylintrc +++ b/.pylintrc @@ -60,7 +60,7 @@ confidence= # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" #disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating -disable=locally-disabled +disable=locally-disabled,superfluous-parens,wrong-import-position, fixme, duplicate-code, maybe-no-member [REPORTS] @@ -108,64 +108,34 @@ name-group= include-naming-hint=no # Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ +function-rgx=[A-Za-z_][a-z0-9_]{2,30}$ # Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ +variable-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$ # Regular expression matching correct constant names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - # Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ +attr-rgx=[A-Za-z_][a-z0-9_]{0,30}$ # Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ +argument-rgx=[A-Za-z_][a-z0-9_]{0,30}$ # Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{0,30}|(__.*__))$ # Regular expression matching correct inline iteration names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - # Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ +class-rgx=[t_]?|[A-Z_][a-zA-Z0-9]+$ # Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ +module-rgx=[t_]?|(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ +method-rgx=[test_]?[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match function or class names that do # not require a docstring. diff --git a/docs/source/examples/hoburgabbeel_ex6_1.py b/docs/source/examples/hoburgabbeel_ex6_1.py index 060fdca..4e6fd4c 100644 --- a/docs/source/examples/hoburgabbeel_ex6_1.py +++ b/docs/source/examples/hoburgabbeel_ex6_1.py @@ -1,5 +1,6 @@ -from gpfit.fit import fit +"Fits an example function" from numpy import logspace, log, log10, random +from gpfit.fit import fit # fixed initial guess for fitting random.seed(33404) diff --git a/gpfit/ba_init.py b/gpfit/ba_init.py new file mode 100644 index 0000000..305d9b9 --- /dev/null +++ b/gpfit/ba_init.py @@ -0,0 +1,81 @@ +"Implements ba_init" +from numpy import ones, hstack, zeros, tile, argmin +from numpy.linalg import lstsq, matrix_rank +from numpy.random import permutation as randperm + + +# pylint: disable=too-many-locals +def ba_init(x, y, K): + """ + Initializes max-affine fit to data (y, x) + ensures that initialization has at least K+1 points per partition (i.e. + per affine function) + + INPUTS: + x: Independent variable data + 2D column vector [nPoints x nDims] + + y: Dependent variable data + 2D column vector [nPoints x 1] + + OUTPUTS: + ba: Initial b and a parameters + 2D array [(dimx+1) x k] + + """ + defaults = {} + defaults['bverbose'] = False + options = defaults + + npt, dimx = x.shape + + X = hstack((ones((npt, 1)), x)) + b = zeros((dimx+1, K)) + + if K*(dimx+1) > npt: + raise ValueError('Not enough data points') + + # Choose K unique indices + randinds = randperm(npt)[0:K] + + # partition based on distances + sqdists = zeros((npt, K)) + for k in range(K): + sqdists[:, k] = ((x - tile(x[randinds[k], :], (npt, 1))) ** 2).sum(1) + + # index to closest k for each data pt + mindistind = argmin(sqdists, axis=1) + + # loop through each partition, making local fits + # note we expand partitions that result in singular least squares problems + # why this way? some points will be shared by multiple partitions, but + # resulting max-affine fit will tend to be good. (as opposed to solving least-norm version) + for k in range(K): + inds = mindistind == k + + # before fitting, check rank and increase partition size if necessary + # (this does create overlaps) + if matrix_rank(X[inds, :]) < dimx + 1: + sortdistind = sqdists[:, k].argsort() + + i = sum(inds) # number of points in partition + iinit = i + + if i < dimx+1: + # obviously, at least need dimx+1 points. fill these in before + # checking any ranks + inds[sortdistind[i+1:dimx+1]] = 1 # TODO: check index + i = dimx+1 # TODO: check index + + # now add points until rank condition satisfied + while matrix_rank(X[inds, :]) < dimx+1: + i = i+1 + inds[sortdistind[i]] = 1 + + if options['bverbose']: + print("ba_init: Added %s points to partition %s to maintain" + "full rank for local fitting." % (i-iinit, k)) + # now create the local fit + b[:, k] = lstsq(X[inds.nonzero()], y[inds.nonzero()])[0][:, 0] + + return b diff --git a/gpfit/evaluate_fit.py b/gpfit/evaluate_fit.py deleted file mode 100644 index 0842ecd..0000000 --- a/gpfit/evaluate_fit.py +++ /dev/null @@ -1,75 +0,0 @@ -" evaluate a fitted constraint " -import numpy as np -from max_affine import max_affine -from softmax_affine import softmax_affine -from implicit_softmax_affine import implicit_softmax_affine - -def evaluate_fit(cnstr, x, fittype): - """ - given a constraint and x data, return y - - Inputs - ------ - cnstr: Constraint - (MonomialInequality, MonomialEquality, - PosynomialInequality) - x: 1D or 2D array - array of input values in log space - fittype: string - "MA", "SMA", or "ISMA" - - Output - ------ - y: 1D array - array of output for the given x inputs in log space - - """ - - y = 0 - - if x.ndim == 1: - x = x.reshape(x.size, 1) - else: - x = x.T - - if fittype == "MA": - if not hasattr(cnstr, "__len__"): - cnstr = [cnstr] - vkn = range(1, len(cnstr[0].varkeys)) - expos = np.array( - [cn.left.exp[list(cn.varkeys["u_%d" % n])[0]] for cn in cnstr - for n in vkn]).reshape(len(cnstr), len(vkn)) - params = np.hstack([np.hstack([np.log(cn.left.c), ex]) - for cn, ex in zip(cnstr, expos)]) - if np.inf in params or 0.0 in params or -np.inf in params: - pass - else: - y, _ = max_affine(x, params) - - elif fittype == "SMA": - wvk = [vk for vk in cnstr.varkeys if vk.name == "w"][0] - alpha = [1/ex[wvk] for ex in cnstr.left.exps][0] - vkn = range(1, len(cnstr.varkeys)) - expos = np.array( - [e[list(cnstr.varkeys["u_%d" % n])[0]] for e in cnstr.right.exps - for n in vkn]).reshape(len(cnstr.right.cs), len(vkn)) - params = np.hstack([np.hstack([np.log(c**(alpha))] + [ex*alpha]) - for c, ex in zip(cnstr.right.cs, expos)]) - params = np.append(params, alpha) - if np.inf in params or 0.0 in params or -np.inf in params: - pass - else: - y, _ = softmax_affine(x, params) - - elif fittype == "ISMA": - wvk = [vk for vk in cnstr.varkeys if vk.name == "w"][0] - alphas = [-1/ex[wvk] for ex in cnstr.left.exps] - vkn = range(1, len(cnstr.varkeys)) - expos = np.array( - [e[list(cnstr.varkeys["u_%d" % n])[0]] for e in cnstr.left.exps - for n in vkn]).reshape(len(cnstr.left.cs), len(vkn)) - params = np.hstack([np.hstack([np.log(c**a)] + [e*a]) for c, e, a in - zip(cnstr.left.cs, expos, alphas)]) - params = np.append(params, alphas) - if np.inf in params or 0.0 in params or -np.inf in params: - pass - else: - y, _ = implicit_softmax_affine(x, params) - - return y diff --git a/gpfit/fit.py b/gpfit/fit.py index bf4445c..844cd98 100644 --- a/gpfit/fit.py +++ b/gpfit/fit.py @@ -1,17 +1,44 @@ -from numpy import append, ones, exp, sqrt, mean, square -from gpkit.nomials import (Posynomial, Monomial, PosynomialInequality, - MonomialEquality) -from implicit_softmax_affine import implicit_softmax_affine -from softmax_affine import softmax_affine -from max_affine import max_affine -from LM import LM -from generic_resid_fun import generic_resid_fun -from max_affine_init import max_affine_init -from print_fit import print_ISMA, print_SMA, print_MA - -def fit(xdata, ydata, K, ftype="ISMA", varNames=None): - ''' - Fits a log-convex function to multivariate data and returns a GP-compatible constraint +"Implements the all-important 'fit' function." +from numpy import ones, exp, sqrt, mean, square, hstack +from gpkit import NamedVariables, VectorVariable, Variable, NomialArray +from .implicit_softmax_affine import implicit_softmax_affine +from .softmax_affine import softmax_affine +from .max_affine import max_affine +from .levenberg_marquardt import levenberg_marquardt +from .ba_init import ba_init +from .print_fit import print_ISMA, print_SMA, print_MA + + +ALPHA_INIT = 10 +RFUN = {"ISMA": implicit_softmax_affine, + "SMA": softmax_affine, + "MA": max_affine} + + +def get_params(ftype, K, xdata, ydata): + "Perform least-squares fitting optimization." + def rfun(params): + "A specific residual function." + [yhat, drdp] = RFUN[ftype](xdata, params) + r = yhat - ydata + return r, drdp + + ba = ba_init(xdata, ydata.reshape(ydata.size, 1), K).flatten('F') + + if ftype == "ISMA": + params, _ = levenberg_marquardt(rfun, hstack((ba, ALPHA_INIT*ones(K)))) + elif ftype == "SMA": + params, _ = levenberg_marquardt(rfun, hstack((ba, ALPHA_INIT))) + else: + params, _ = levenberg_marquardt(rfun, ba) + + return params + + +# pylint: disable=too-many-locals +def fit(xdata, ydata, K, ftype="ISMA"): + """ + Fit a log-convex function to multivariate data, returning a GP constraint INPUTS xdata: Independent variable data @@ -23,183 +50,77 @@ def fit(xdata, ydata, K, ftype="ISMA", varNames=None): 1D numpy array [nPoints,] [<---------- y ------------->] - K: Number of terms in the fit - integer > 0 - - ftype: Fit type - one of the following strings: "ISMA" (default), "SMA" or "MA" - - varNames: List of variable names strings - Independent variables first, with dependent variable at the end - Default value: ['u_1', 'u_2', ...., 'u_d', 'w'] - - OUTPUTS - cstrt: GPkit PosynomialInequality object - For K > 1, this will be a posynomial inequality constraint - If K = 1, this is automatically made into an equality constraint - If MA fit and K > 1, this is a list of constraint objects - - rmsErr: RMS error - Root mean square error between original (not log transformed) - data and function fit. - ''' + rms_error: float + Root mean square error between original (not log transformed) + data and function fit. + """ # Check data is in correct form if ydata.ndim > 1: raise ValueError('Dependent data should be in the form of a 1D numpy array') # Transform to column vector - ydata = ydata.reshape(ydata.size,1) - - if xdata.ndim == 1: - xdata = xdata.reshape(xdata.size,1) - else: - xdata = xdata.T + xdata = xdata.reshape(xdata.size, 1) if xdata.ndim == 1 else xdata.T # Dimension of function (number of independent variables) - d = xdata.shape[1] - - # Create varNames if None - if varNames == None: - varNames = [] - for i in range(d): - varNames.append('u_{0:d}'.format(i+1)) - varNames.append('w') - - # Initialize fitting variables - alphainit = 10 - bainit = max_affine_init(xdata, ydata, K) - - if ftype == "ISMA": - - def rfun (p): - return generic_resid_fun(implicit_softmax_affine, xdata, ydata, p) - [params, RMStraj] = LM(rfun, append(bainit.flatten('F'), alphainit*ones((K,1)))) - - # Approximated data - y_ISMA, _ = implicit_softmax_affine(xdata, params) - w_ISMA = exp(y_ISMA) - - # RMS error - w = (exp(ydata)).T[0] - #rmsErr = sqrt(mean(square(w_ISMA-w))) - rmsErr = sqrt(mean(square(y_ISMA-ydata.T[0]))) + d = int(xdata.shape[1]) - alpha = 1./params[range(-K,0)] + with NamedVariables("fit"): + u = VectorVariable(d, "u") + w = Variable("w") - # A: exponent parameters, B: coefficient parameters - A = params[[i for i in range(K*(d+1)) if i % (d + 1) != 0]] - B = params[[i for i in range(K*(d+1)) if i % (d + 1) == 0]] + params = get_params(ftype, K, xdata, ydata) - print_str = print_ISMA(A, B, alpha, d, K) - - # Calculate c's and exp's for creating GPkit objects - cs = [] - exps = [] - for k in range(K): - cs.append(exp(alpha[k] * B[k])) - expdict = {} - for i in range(d): - expdict[varNames[i]] = alpha[k] * A[k*d + i] - expdict[varNames[-1]] = -alpha[k] - exps.append(expdict) - - cs = tuple(cs) - exps = tuple(exps) - - # Create gpkit objects - # ISMA returns a constraint of the form 1 >= c1*u1^exp1*u2^exp2*w^(-alpha) + .... - posy = Posynomial(exps, cs) - cstrt = (posy <= 1) - - # # If only one term, automatically make an equality constraint - if K == 1: - cstrt = MonomialEquality(posy, "=", 1) + # A: exponent parameters, B: coefficient parameters + A = params[[i for i in range(K*(d+1)) if i % (d + 1) != 0]] + B = params[[i for i in range(K*(d+1)) if i % (d + 1) == 0]] + if ftype == "ISMA": + alpha = 1./params[range(-K, 0)] elif ftype == "SMA": - - def rfun (p): - return generic_resid_fun(softmax_affine, xdata, ydata, p) - [params, RMStraj] = LM(rfun, append(bainit.flatten('F'), alphainit)) - - # Approximated data - y_SMA, _ = softmax_affine(xdata, params) - w_SMA = exp(y_SMA) - - # RMS error - w = (exp(ydata)).T[0] - #rmsErr = sqrt(mean(square(w_SMA-w))) - rmsErr = sqrt(mean(square(y_SMA-ydata.T[0]))) - alpha = 1./params[-1] - - A = params[[i for i in range(K*(d+1)) if i % (d + 1) != 0]] - B = params[[i for i in range(K*(d+1)) if i % (d + 1) == 0]] - - print_str = print_SMA(A, B, alpha, d, K) - - # Calculate c's and exp's for creating GPkit objects - cs = [] - exps = [] - for k in range(K): - cs.append(exp(alpha * B[k])) - expdict = {} - for i in range(d): - expdict[varNames[i]] = alpha * A[k*d + i] - exps.append(expdict) - - cs = tuple(cs) - exps = tuple(exps) - - # Creates dictionary for the monomial side of the constraint - w_exp = {varNames[-1]: alpha} - - # Create gpkit objects - # SMA returns a constraint of the form w^alpha >= c1*u1^exp1 + c2*u2^exp2 +.... - posy = Posynomial(exps, cs) - mono = Monomial(w_exp, 1) - cstrt = (mono >= posy) - - # # If only one term, automatically make an equality constraint - if K == 1: - cstrt = MonomialEquality(mono, "=", posy) - - elif ftype == "MA": + alpha = 1 - def rfun(p): - return generic_resid_fun(max_affine, xdata, ydata, p) - [params, RMStraj] = LM(rfun, bainit.flatten('F')) + monos = exp(B*alpha) * NomialArray([(u**A[k*d:(k+1)*d]).prod() + for k in range(K)])**alpha - # Approximated data - y_MA, _ = max_affine(xdata, params) - w_MA = exp(y_MA) - - # RMS error - w = (exp(ydata)).T[0] - #rmsErr = sqrt(mean(square(w_MA-w))) - rmsErr = sqrt(mean(square(y_MA-ydata.T[0]))) - - A = params[[i for i in range(K*(d+1)) if i % (d + 1) != 0]] - B = params[[i for i in range(K*(d+1)) if i % (d + 1) == 0]] + if ftype == "ISMA": + # constraint of the form 1 >= c1*u1^exp1*u2^exp2*w^(-alpha) + .... + lhs, rhs = 1, (monos/w**alpha).sum() + print_ISMA(A, B, alpha, d, K) + elif ftype == "SMA": + # constraint of the form w^alpha >= c1*u1^exp1 + c2*u2^exp2 +.... + lhs, rhs = w**alpha, monos.sum() + print_SMA(A, B, alpha, d, K) + elif ftype == "MA": + # constraint of the form w >= c1*u1^exp1, w >= c2*u2^exp2, .... + lhs, rhs = w, monos + print_MA(A, B, d, K) - print_str = print_MA(A, B, d, K) + if K == 1: + # when possible, return an equality constraint + cstrt = (lhs == rhs) + else: + cstrt = (lhs >= rhs) - w_exp = {varNames[-1]: 1} - mono1 = Monomial(w_exp,1) + def evaluate(xdata): + """ + Evaluate the y of this fit over a range of xdata. - cstrt = [] - for k in range(K): - cs = exp(B[k]) + INPUTS + xdata: Independent variable data + 2D numpy array [nDim, nPoints] + [[<--------- x1 ------------->] + [<--------- x2 ------------->]] - exps = {} - for i in range(d): - exps[varNames[i]] = A[k*d + i] - mono2 = Monomial(exps, cs) - cstrt1 = PosynomialInequality(mono2, "<=", mono1) - cstrt.append(cstrt1) + OUTPUT + ydata: Dependent variable data in 1D numpy array + """ + xdata = xdata.reshape(xdata.size, 1) if xdata.ndim == 1 else xdata.T + return RFUN[ftype](xdata, params)[0] - if K == 1: - cstrt = MonomialEquality(mono2, "=", mono1) + cstrt.evaluate = evaluate + rms_error = sqrt(mean(square(evaluate(xdata.T)-ydata))) - return cstrt, rmsErr + return cstrt, rms_error diff --git a/gpfit/generic_resid_fun.py b/gpfit/generic_resid_fun.py deleted file mode 100644 index aaf483d..0000000 --- a/gpfit/generic_resid_fun.py +++ /dev/null @@ -1,33 +0,0 @@ -def generic_resid_fun(yfun, xdata, ydata, params): - ''' - generic residual function -- converts yfun(xdata,params) - to a residfun [r, drdp] = generic_resid_fun(yfun, xdata, ydata, params) - used by nonlinear least squares fitting algorithms - to get a residual function [r,drdp] = residfun(params), - use rfun = @(p) generic_resid_fun(@yfun, xdata, ydata, p) - - note this function defines resids as + when yhat > ydata - (opposite of typical conventions, but eliminates need for sign change) - - INPUTS: - yfun: Function (e.g. softmax_affine) - - xdata: X data from original problem - [n x 1 2D column vector] n = number of data points - - ydata: Y data from original problem - [n x 1 2D column vector] n = number of data points - - params: Fit parameters - [m-element 1D array] m = m(k) (e.g. For max-affine, m = 2*k) - - OUTPUTS: - r: residual [n-element 1D array] n = number of data points - - drdp: Jacobian [n x m matrix] - ''' - - [yhat, drdp] = yfun(xdata, params) - r = yhat - ydata.T[0] - - return r, drdp \ No newline at end of file diff --git a/gpfit/implicit_softmax_affine.py b/gpfit/implicit_softmax_affine.py index 7b934d8..e845949 100644 --- a/gpfit/implicit_softmax_affine.py +++ b/gpfit/implicit_softmax_affine.py @@ -1,8 +1,11 @@ -from numpy import ones, reshape, nan, inf, hstack, dot, tile -from lse_implicit import lse_implicit +"Implements ISMA residual function" +from numpy import ones, nan, inf, hstack, dot, tile +from .lse_implicit import lse_implicit + +# pylint: disable=too-many-locals def implicit_softmax_affine(x, params): - ''' + """ Evaluates implicit softmax affine function at values of x, given a set of ISMA fit parameters. @@ -21,28 +24,18 @@ def implicit_softmax_affine(x, params): dydp: Jacobian matrix - ''' + """ npt, dimx = x.shape - K = params.size/(dimx+2) ba = params[0:-K] alpha = params[-K:] - - #reshape ba to matrix - ba = ba.reshape(dimx+1, K, order='F') - if any(alpha <= 0): - y = inf*ones((npt,1)) - dydp = nan - return y, dydp - - #augment data with column of ones - X = hstack((ones((npt,1)), x)) - - #compute affine functions - z = dot(X,ba) + return inf*ones((npt, 1)), nan + ba = ba.reshape(dimx+1, K, order='F') # reshape ba to matrix + X = hstack((ones((npt, 1)), x)) # augment data with column of ones + z = dot(X, ba) # compute affine functions y, dydz, dydalpha = lse_implicit(z, alpha) @@ -51,4 +44,4 @@ def implicit_softmax_affine(x, params): dydba = repmat * tile(X, (1, K)) dydp = hstack((dydba, dydalpha)) - return y, dydp \ No newline at end of file + return y, dydp diff --git a/gpfit/LM.py b/gpfit/levenberg_marquardt.py similarity index 93% rename from gpfit/LM.py rename to gpfit/levenberg_marquardt.py index 8c5d176..4fd0fb4 100644 --- a/gpfit/LM.py +++ b/gpfit/levenberg_marquardt.py @@ -1,17 +1,19 @@ +"Implements LM" +from time import time +from sys import float_info import numpy as np from numpy.linalg import norm from scipy.sparse import spdiags, issparse -from time import time -from sys import float_info -def LM(residfun, initparams, - verbose=False, - lambdainit=0.02, - maxiter=5000, - maxtime=5., - tolgrad=np.sqrt(float_info.epsilon), - tolrms=1e-7): +# pylint: disable=too-many-locals,too-many-arguments,too-many-branches,too-many-statements +def levenberg_marquardt(residfun, initparams, + verbose=False, + lambdainit=0.02, + maxiter=5000, + maxtime=5., + tolgrad=np.sqrt(float_info.epsilon), + tolrms=1e-7): """ Levenberg-Marquardt alogrithm Minimizes sum of squared error of residual function diff --git a/gpfit/lse_implicit.py b/gpfit/lse_implicit.py index ceca481..e5a14b3 100644 --- a/gpfit/lse_implicit.py +++ b/gpfit/lse_implicit.py @@ -1,7 +1,10 @@ -from numpy import zeros, ones, spacing, exp, log, tile +"Implements lse_implicit" +from numpy import zeros, spacing, exp, log, tile + +# pylint: disable=too-many-locals def lse_implicit(x, alpha): - ''' + """ Implicit Log-sum-exponential function with derivatives - sums across the second dimension of x - returns one y for every row of x @@ -24,14 +27,14 @@ def lse_implicit(x, alpha): dydalpha: 2D array [nPoints x nDim] - ''' + """ bverbose = False tol = 10*spacing(1) npt, nx = x.shape - if not alpha.size == nx: + if nx != alpha.size: raise ValueError('alpha size mismatch') alphamat = tile(alpha, (npt, 1)) @@ -50,7 +53,7 @@ def lse_implicit(x, alpha): # initial eval expo = exp(alphamat*(h-Lmat)) alphaexpo = alphamat*expo - sumexpo = expo.sum(axis=1) + sumexpo = expo.sum(axis=1) sumalphaexpo = alphaexpo.sum(axis=1) f = log(sumexpo) dfdL = -alphaexpo.sum(axis=1)/sumexpo @@ -81,5 +84,4 @@ def lse_implicit(x, alpha): dydx = alphaexpo/(tile(sumalphaexpo, (nx, 1))).T dydalpha = (h - Lmat)*expo/(tile(sumalphaexpo, (nx, 1))).T - return y, dydx, dydalpha diff --git a/gpfit/lse_scaled.py b/gpfit/lse_scaled.py index a85d6b5..5ea05e2 100644 --- a/gpfit/lse_scaled.py +++ b/gpfit/lse_scaled.py @@ -1,7 +1,9 @@ -from numpy import array, tile, exp, log +"Implements lse_scaled" +from numpy import tile, exp, log + def lse_scaled(x, alpha): - ''' + """ Log-sum-exponential function with derivatives - sums across the second dimension of x - note that lse_scaled is a mapping R^n --> R @@ -22,7 +24,7 @@ def lse_scaled(x, alpha): dydalpha: [n-element 1D array], n is number of data points - ''' + """ _, n = x.shape diff --git a/gpfit/max_affine.py b/gpfit/max_affine.py index 39ca4c9..9a3ec0a 100644 --- a/gpfit/max_affine.py +++ b/gpfit/max_affine.py @@ -1,3 +1,4 @@ +"Implements MA residual function" import numpy as np diff --git a/gpfit/max_affine_init.py b/gpfit/max_affine_init.py deleted file mode 100644 index fec283d..0000000 --- a/gpfit/max_affine_init.py +++ /dev/null @@ -1,81 +0,0 @@ -from numpy import array, ones, hstack, zeros, tile, argmin, nonzero -from numpy.linalg import lstsq, matrix_rank -from numpy.random import permutation as randperm - -def max_affine_init(x, y, K): - ''' - initializes max-affine fit to data (y, x) - ensures that initialization has at least K+1 points per partition (i.e. - per affine function) - - INPUTS: - x: Independent variable data - 2D column vector [nPoints x nDims] - - y: Dependent variable data - 2D column vector [nPoints x 1] - - OUTPUTS: - ba: Initial b and a parameters - 2D array [(dimx+1) x k] - - ''' - defaults = {} - defaults['bverbose'] = False - options = defaults - - npt,dimx = x.shape - - X = hstack((ones((npt, 1)), x)) - ba = zeros((dimx+1,K)) - - if K*(dimx+1) > npt: - raise ValueError('Not enough data points') - - # Choose K unique indices - randinds = randperm(npt)[0:K] - - # partition based on distances - sqdists = zeros((npt, K)) - for k in range(K): - sqdists[:,k] = ((x - tile(x[randinds[k],:],(npt, 1))) ** 2).sum(1) - - #index to closest k for each data pt - mindistind = argmin(sqdists,axis=1) - - ''' - loop through each partition, making local fits - note we expand partitions that result in singular least squares problems - why this way? some points will be shared by multiple partitions, but - resulting max-affine fit will tend to be good. (as opposed to solving least-norm version) - ''' - for k in range(K): - - inds = mindistind == k - - #before fitting, check rank and increase partition size if necessary - #(this does create overlaps) - if matrix_rank(X[inds, :]) < dimx + 1: - sortdistind = sqdists[:,k].argsort() - - i = sum(inds) #i is number of points in partition - iinit = i - - if i < dimx+1: - #obviously, at least need dimx+1 points. fill these in before - #checking any ranks - inds[sortdistind[i+1:dimx+1]] = 1 #todo: check index - i = dimx+1 #todo: check index - - #now add points until rank condition satisfied - while matrix_rank(X[inds, :]) < dimx+1: - i = i+1 - inds[sortdistind[i]] = 1 - - if options['bverbose']: - print('max_affine_init: Added ' + repr(i-iinit) + ' points to partition ' + repr(k) + ' to maintain full rank for local fitting.') - - #now create the local fit - ba[:,k] = lstsq(X[inds.nonzero()], y[inds.nonzero()])[0][:,0] - - return ba diff --git a/gpfit/print_fit.py b/gpfit/print_fit.py index 3b37672..5415358 100644 --- a/gpfit/print_fit.py +++ b/gpfit/print_fit.py @@ -1,7 +1,10 @@ -from numpy import arange, exp, array +"Implements functions for raw fit printing from params" +from numpy import exp -def print_ISMA(A, B, alpha, d, K): +# pylint: disable=invalid-name +def print_ISMA(A, B, alpha, d, K): + "print ISMA fit from params" stringList = [None]*K printString = '1 = ' @@ -21,8 +24,9 @@ def print_ISMA(A, B, alpha, d, K): return stringList +# pylint: disable=invalid-name def print_SMA(A, B, alpha, d, K): - + "print SMA fit from params" stringList = [None]*K printString = 'w**{0:.6g} = '.format(alpha) @@ -42,11 +46,9 @@ def print_SMA(A, B, alpha, d, K): return stringList +# pylint: disable=invalid-name def print_MA(A, B, d, K): - ''' - Print set of K monomial inequality constraints - ''' - + "print MA fit from params" stringList = [None]*K for k in range(K): diff --git a/gpfit/softmax_affine.py b/gpfit/softmax_affine.py index b311067..e77f667 100644 --- a/gpfit/softmax_affine.py +++ b/gpfit/softmax_affine.py @@ -1,8 +1,11 @@ -from numpy import size, inf, nan, ones, hstack, reshape, dot, tile -from lse_scaled import lse_scaled +"Implements SMA residual function" +from numpy import size, inf, nan, ones, hstack, dot, tile +from .lse_scaled import lse_scaled + +# pylint: disable=too-many-locals def softmax_affine(x, params): - ''' + """ Evaluates softmax affine function at values of x, given a set of SMA fit parameters. @@ -20,37 +23,28 @@ def softmax_affine(x, params): 1D numpy array [nPoints] dydp: Jacobian matrix - ''' + """ + npt, dimx = x.shape ba = params[0:-1] - softness = params[-1] #equivalent of end - + softness = params[-1] alpha = 1/softness - - npt, dimx = x.shape + if alpha <= 0: + return inf*ones((npt, 1)), nan K = size(ba)/(dimx+1) ba = ba.reshape(dimx+1, K, order='F') - if alpha <= 0: - y = inf*ones((npt,1)) - dydp = nan - return y, dydp - - #augment data with column of ones - X = hstack((ones((npt,1)), x)) - - #compute affine functions - z = dot(X,ba) + X = hstack((ones((npt, 1)), x)) # augment data with column of ones + z = dot(X, ba) # compute affine functions y, dydz, dydsoftness = lse_scaled(z, alpha) - dydsoftness = - dydsoftness*(alpha**2) + dydsoftness = -dydsoftness*(alpha**2) nrow, ncol = dydz.shape repmat = tile(dydz, (dimx+1, 1)).reshape(nrow, ncol*(dimx+1), order='F') dydba = repmat * tile(X, (1, K)) - - dydsoftness.shape = (dydsoftness.size,1) + dydsoftness.shape = (dydsoftness.size, 1) dydp = hstack((dydba, dydsoftness)) return y, dydp diff --git a/gpfit/tests/aardvark.py b/gpfit/tests/aardvark.py deleted file mode 100644 index 18b212b..0000000 --- a/gpfit/tests/aardvark.py +++ /dev/null @@ -1,106 +0,0 @@ -from numpy import arange, meshgrid, hstack, linspace, log, exp, maximum, sqrt, mean, square -from gpfit.compare_fits import compare_fits -from gpfit.implicit_softmax_affine import implicit_softmax_affine - -def albatross(): - [X, Y] = meshgrid(arange(1.,6.),arange(1.,6.)) - Z = X**2+Y**2 - K = 2 - compute_fit(X,Y,Z,K) - -def alligator(): - [X, Y] = meshgrid(arange(1.,6.),arange(1.,6.)) - Z = (X**2+Y**2)**0.5 - K = 2 - compute_fit(X,Y,Z,K) - -def angelfish(): - [X, Y] = meshgrid(arange(1.,6.),arange(1.,6.)) - Z = X/Y + Y/X - K = 2 - compute_fit(X,Y,Z,K) - -def anteater(): - [X, Y] = meshgrid(arange(1.,6.),arange(1.,6.)) - Z = (X/Y + Y/X)**0.5 - K = 2 - compute_fit(X,Y,Z,K) - -def antelope(): - [X, Y] = meshgrid(linspace(1.,2.,30),linspace(0.2,0.4,30)) - Z = (1.09*X**4.27*Y**0.112 + (7.79e-5)*X**4.75/Y**6.44 + (1.36e-7)*X**8.94/Y**8.89)**(1/2.14) - K = 2 - compute_fit(X,Y,Z,K) - -def appenzeller(): - [X, Y] = meshgrid(linspace(1.,2.,30),linspace(0.2,0.4,30)) - Z = X**2 + 30*X*exp(-(Y-0.06*X)/0.039) - K = 2 - compute_fit(X,Y,Z,K) - -def armadillo(): - [X, Y] = meshgrid(linspace(1.,2.,30),linspace(0.2,0.4,30)) - Z = X**2 + 30*X*exp(-(Y-0.06*X)/0.039) - K = 3 - compute_fit(X,Y,Z,K) - -def compute_fit(X,Y,Z,K): - u1, u2 = X, Y - - w = Z.reshape(Z.size,1) - X = X.reshape(X.size,1) - Y = Y.reshape(Y.size,1) - u = hstack((X,Y)) - - x = log(u) - y = log(w) - - s = compare_fits(x,y, K,1) - - PAR_MA = s['max_affine']['params'][0][0] - PAR_SMA = s['softmax_optMAinit']['params'][0][0] - PAR_ISMA = s['implicit_originit']['params'][0][0] - - if K == 2: - # Max Affine Fitting - A = PAR_MA[[1,2,4,5]] - B = PAR_MA[[0,3]] - w_MA_1 = exp(B[0]) * u1**A[0] * u2**A[1] - w_MA_2 = exp(B[1]) * u1**A[2] * u2**A[3] - w_MA = maximum(w_MA_1, w_MA_2) - - # Softmax Affine Fitting - A = PAR_SMA[[1,2,4,5]] - B = PAR_SMA[[0,3]] - alpha = 1.0/PAR_SMA[-1] - w_SMA = (exp(alpha*B[0]) * u1**(alpha*A[0]) * u2**(alpha*A[1]) + - exp(alpha*B[1]) * u1**(alpha*A[2]) * u2**(alpha*A[3]) - )**(1.0/alpha) - - elif K ==3: - A = PAR_MA[[1,2,4,5,7,8]] - B = PAR_MA[[0,3,6]] - w_MA_1 = exp(B[0]) * u1**A[0] * u2**A[1] - w_MA_2 = exp(B[1]) * u1**A[2] * u2**A[3] - w_MA_3 = exp(B[2]) * u1**A[3] * u2**A[4] - w_MA = maximum(w_MA_1, w_MA_2, w_MA_3) - - A = PAR_SMA[[1,2,4,5,7,8]] - B = PAR_SMA[[0,3,6]] - alpha = 1.0/PAR_SMA[-1] - w_SMA = (exp(alpha*B[0]) * u1**(alpha*A[0]) * u2**(alpha*A[1]) + - exp(alpha*B[1]) * u1**(alpha*A[2]) * u2**(alpha*A[3]) + - exp(alpha*B[2]) * u1**(alpha*A[4]) * u2**(alpha*A[5]) - )**(1.0/alpha) - - MA_rms_pct_error = sqrt(mean(square((w_MA - Z)/Z))) - print MA_rms_pct_error - SMA_rms_pct_error = sqrt(mean(square((w_SMA - Z)/Z))) - print SMA_rms_pct_error - - # Implicit Softmax Affine Fitting - w_ISMA, _ = implicit_softmax_affine(x,PAR_ISMA) - w_ISMA = exp(w_ISMA) - ISMA_rms_pct_error = sqrt(mean(square((w_ISMA - w.T[0])/w.T[0]))) - print ISMA_rms_pct_error - diff --git a/gpfit/tests/run_tests.py b/gpfit/tests/run_tests.py index 08fd73c..e3d7fe7 100644 --- a/gpfit/tests/run_tests.py +++ b/gpfit/tests/run_tests.py @@ -1,36 +1,33 @@ -import unittest +"Runs all tests" from gpkit.tests.helpers import run_tests TESTS = [] from gpfit.tests import t_LM -TESTS += t_LM.tests - -from gpfit.tests import t_generic_resid_fun -TESTS += t_generic_resid_fun.tests +TESTS += t_LM.TESTS from gpfit.tests import t_lse_implicit -TESTS += t_lse_implicit.tests +TESTS += t_lse_implicit.TESTS from gpfit.tests import t_lse_scaled -TESTS += t_lse_scaled.tests +TESTS += t_lse_scaled.TESTS -from gpfit.tests import t_max_affine_init -TESTS += t_max_affine_init.tests +from gpfit.tests import t_ba_init +TESTS += t_ba_init.TESTS from gpfit.tests import t_max_affine -TESTS += t_max_affine.tests +TESTS += t_max_affine.TESTS from gpfit.tests import t_softmax_affine -TESTS += t_softmax_affine.tests +TESTS += t_softmax_affine.TESTS from gpfit.tests import t_implicit_softmax_affine -TESTS += t_implicit_softmax_affine.tests +TESTS += t_implicit_softmax_affine.TESTS from gpfit.tests import t_print_fit TESTS += t_print_fit.TESTS from gpfit.tests import t_ex6_3 -TESTS += t_ex6_3.tests +TESTS += t_ex6_3.TESTS from gpfit.tests import t_examples TESTS += t_examples.TESTS diff --git a/gpfit/tests/seed.py b/gpfit/tests/seed.py deleted file mode 100644 index 976e5fd..0000000 --- a/gpfit/tests/seed.py +++ /dev/null @@ -1,2 +0,0 @@ -"Default seed for random number generation" -SEED = 33404 diff --git a/gpfit/tests/t_LM.py b/gpfit/tests/t_LM.py index 61eb40b..e0afe91 100644 --- a/gpfit/tests/t_LM.py +++ b/gpfit/tests/t_LM.py @@ -1,19 +1,21 @@ +"Tests levenberg_marquardt" import unittest -from gpfit.LM import LM -from gpfit.max_affine import max_affine -from gpfit.generic_resid_fun import generic_resid_fun from numpy import arange, newaxis +from gpfit.levenberg_marquardt import levenberg_marquardt +from gpfit.max_affine import max_affine + -class t_LM(unittest.TestCase): - - def rfun (p): return generic_resid_fun(max_affine, - arange(0.,16.)[:,newaxis], - arange(0.,16.)[:,newaxis], - p) - residfun = rfun - initparams = arange(1.,5.) +def rfun(params): + "A specific residual function." + [yhat, drdp] = max_affine(arange(0., 16.)[:, newaxis], params) + r = yhat - arange(0., 16.)[:, newaxis].T[0] + return r, drdp - params, RMStraj = LM(residfun, initparams) + +class t_levenberg_marquardt(unittest.TestCase): + "Tests levenberg_marquardt" + initparams = arange(1., 5.) + params, RMStraj = levenberg_marquardt(rfun, initparams) def test_params_size(self): self.assertEqual(self.params.size, self.initparams.size) @@ -21,20 +23,21 @@ def test_params_size(self): def test_params_ndim(self): self.assertEqual(self.params.ndim, 1) - def test_RMStraj_shape(self): + def test_rmstraj_shape(self): # self.assertEqual(self.RMStraj.shape, (self.x.size, self.ba.size)) pass - def test_RMStraj_ndim(self): + def test_rmstraj_ndim(self): + self.assertEqual(self.RMStraj.ndim, 1) -tests = [t_LM] +TESTS = [t_levenberg_marquardt] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_max_affine_init.py b/gpfit/tests/t_ba_init.py similarity index 67% rename from gpfit/tests/t_max_affine_init.py rename to gpfit/tests/t_ba_init.py index 77c1fb5..4803384 100644 --- a/gpfit/tests/t_max_affine_init.py +++ b/gpfit/tests/t_ba_init.py @@ -1,17 +1,22 @@ -"unit tests for max_affine_init function" +"unit tests for ba_init function" import unittest import numpy as np from numpy import arange, newaxis, vstack, log, exp from numpy.random import random_sample -from gpfit.max_affine_init import max_affine_init -from .seed import SEED +from gpfit.ba_init import ba_init + +SEED = 33404 -class TestMaxAffineInitK2(unittest.TestCase): +class TestMaxAffineInitK2(unittest.TestCase): + """ + This unit test ensures that max affine init produces an array + of the expected shape and size + """ x = arange(0., 16.)[:, newaxis] y = arange(0., 16.)[:, newaxis] K = 2 - ba = max_affine_init(x, y, K) + ba = ba_init(x, y, K) def test_ba_ndim_k2(self): self.assertEqual(self.ba.ndim, 2) @@ -21,11 +26,12 @@ def test_ba_shape_k2(self): _, dimx = self.x.shape self.assertEqual(self.ba.shape, (dimx+1, self.K)) + class TestMaxAffineInitK4(unittest.TestCase): - ''' + """ This unit test ensures that max affine init produces an array of the expected shape and size - ''' + """ np.random.seed(SEED) Vdd = random_sample(1000,) + 1 Vth = 0.2*random_sample(1000,) + 0.2 @@ -37,21 +43,19 @@ class TestMaxAffineInitK4(unittest.TestCase): y = y.reshape(y.size, 1) K = 4 - ba = max_affine_init(x,y,K) + ba = ba_init(x, y, K) def test_ba_shape_k4(self): self.assertEqual(self.ba.shape, (3, 4)) - - -tests = [TestMaxAffineInitK2, +TESTS = [TestMaxAffineInitK2, TestMaxAffineInitK4] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_ex6_3.py b/gpfit/tests/t_ex6_3.py index c64e324..8f0dbf1 100644 --- a/gpfit/tests/t_ex6_3.py +++ b/gpfit/tests/t_ex6_3.py @@ -3,73 +3,74 @@ import numpy as np from numpy import log, exp, vstack from numpy.random import random_sample -from .seed import SEED from gpfit.fit import fit +SEED = 33404 + + class TestEx63ISMA(unittest.TestCase): - ''' - ISMA unit tests based on example 6.3 from GPfit paper - ''' - np.random.seed(SEED) - Vdd = random_sample(1000,) + 1 - Vth = 0.2*random_sample(1000,) + 0.2 - P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) - u = vstack((Vdd,Vth)) - x = log(u) - y = log(P) - K = 4 - - cstrt, rms_error = fit(x, y, K, "ISMA") + "ISMA unit tests based on example 6.3 from GPfit paper" def test_rms_error(self): - self.assertTrue(self.rms_error < 5e-4) + np.random.seed(SEED) + Vdd = random_sample(1000,) + 1 + Vth = 0.2*random_sample(1000,) + 0.2 + P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) + u = vstack((Vdd, Vth)) + x = log(u) + y = log(P) + K = 4 + + _, rms_error = fit(x, y, K, "ISMA") + + self.assertTrue(rms_error < 5e-4) + class TestEx63SMA(unittest.TestCase): - ''' - SMA unit tests based on example 6.3 from GPfit paper - ''' - np.random.seed(SEED) - Vdd = random_sample(1000,) + 1 - Vth = 0.2*random_sample(1000,) + 0.2 - P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) - u = vstack((Vdd,Vth)) - x = log(u) - y = log(P) - K = 4 - - cstrt, rms_error = fit(x, y, K, "SMA") + "SMA unit tests based on example 6.3 from GPfit paper" def test_rms_error(self): - self.assertTrue(self.rms_error < 5e-4) + np.random.seed(SEED) + Vdd = random_sample(1000,) + 1 + Vth = 0.2*random_sample(1000,) + 0.2 + P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) + u = vstack((Vdd, Vth)) + x = log(u) + y = log(P) + K = 4 + + _, rms_error = fit(x, y, K, "SMA") + + self.assertTrue(rms_error < 5e-4) + class TestEx63MA(unittest.TestCase): - ''' - MA unit tests based on example 6.3 from GPfit paper - ''' - np.random.seed(SEED) - Vdd = random_sample(1000,) + 1 - Vth = 0.2*random_sample(1000,) + 0.2 - P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) - u = vstack((Vdd,Vth)) - x = log(u) - y = log(P) - K = 4 - - cstrt, rms_error = fit(x, y, K, "MA") + "MA unit tests based on example 6.3 from GPfit paper" def test_rms_error(self): - self.assertTrue(self.rms_error < 1e-2) + np.random.seed(SEED) + Vdd = random_sample(1000,) + 1 + Vth = 0.2*random_sample(1000,) + 0.2 + P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039) + u = vstack((Vdd, Vth)) + x = log(u) + y = log(P) + K = 4 + + _, rms_error = fit(x, y, K, "MA") + + self.assertTrue(rms_error < 1e-2) -tests = [TestEx63ISMA, +TESTS = [TestEx63ISMA, TestEx63SMA, TestEx63MA] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_examples.py b/gpfit/tests/t_examples.py index 342f417..bf9ad72 100644 --- a/gpfit/tests/t_examples.py +++ b/gpfit/tests/t_examples.py @@ -1,7 +1,6 @@ -"""Unit testing of tests in docs/source/examples""" +"Unit testing of tests in docs/source/examples" import unittest import os -import numpy as np from gpkit.tests.helpers import generate_example_tests diff --git a/gpfit/tests/t_generic_resid_fun.py b/gpfit/tests/t_generic_resid_fun.py deleted file mode 100644 index 53c2fbe..0000000 --- a/gpfit/tests/t_generic_resid_fun.py +++ /dev/null @@ -1,36 +0,0 @@ -import unittest -from numpy import arange, newaxis -from gpfit.max_affine import max_affine -from gpfit.generic_resid_fun import generic_resid_fun - -class t_generic_resid_fun(unittest.TestCase): - - yfun = max_affine - xdata = arange(1.,11.)[:,newaxis] - ydata = arange(1.,11.)[:,newaxis] - params = arange(1.,5.) - r, drdp = generic_resid_fun(yfun, xdata, ydata, params) - - def test_size_of_r(self): - self.assertTrue(self.r.size == self.xdata.size) - - def test_dimension_of_r(self): - self.assertTrue(self.r.ndim == 1) - - def test_size_of_drdp(self): - self.assertTrue(self.drdp.shape == (self.xdata.size, self.params.size)) - - def test_dimension_of_drdp(self): - self.assertTrue(self.drdp.ndim == 2) - - -tests = [t_generic_resid_fun] - -if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() - - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) - - unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/gpfit/tests/t_implicit_softmax_affine.py b/gpfit/tests/t_implicit_softmax_affine.py index 49e10a5..90b1fe7 100644 --- a/gpfit/tests/t_implicit_softmax_affine.py +++ b/gpfit/tests/t_implicit_softmax_affine.py @@ -1,13 +1,16 @@ +"Tests implicit_softmax_affine" import unittest -from gpfit.implicit_softmax_affine import implicit_softmax_affine from numpy import arange, newaxis +from gpfit.implicit_softmax_affine import implicit_softmax_affine + class t_implicit_softmax_affine(unittest.TestCase): + "Tests implicit_softmax_affine" - x = arange(0.,16.)[:,newaxis] - params = arange(1.,7.) + x = arange(0., 16.)[:, newaxis] + params = arange(1., 7.) - y, dydp = implicit_softmax_affine(x,params) + y, dydp = implicit_softmax_affine(x, params) def test_y_size(self): self.assertEqual(self.y.size, self.x.size) @@ -21,13 +24,13 @@ def test_dydp_shape(self): def test_dydp_ndim(self): self.assertEqual(self.dydp.ndim, 2) -tests = [t_implicit_softmax_affine] +TESTS = [t_implicit_softmax_affine] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_lse_implicit.py b/gpfit/tests/t_lse_implicit.py index cdb5a83..2f778cd 100644 --- a/gpfit/tests/t_lse_implicit.py +++ b/gpfit/tests/t_lse_implicit.py @@ -3,14 +3,16 @@ import numpy as np from numpy import array, arange from gpfit.lse_implicit import lse_implicit -from .seed import SEED + +SEED = 33404 + class TestLSEimplicit1D(unittest.TestCase): "tests with one-dimensional input" x = arange(1., 31.).reshape(15, 2) alpha = arange(1., 3.) - y, dydx, dydalpha = lse_implicit(x,alpha) + y, dydx, dydalpha = lse_implicit(x, alpha) def test_y_ndim(self): self.assertEqual(self.y.ndim, 1) @@ -36,7 +38,7 @@ class TestLSEimplicit2D(unittest.TestCase): np.random.seed(SEED) K = 4 - x = np.random.rand(1000,K) + x = np.random.rand(1000, K) alpha = array([1.499, 13.703, 3.219, 4.148]) y, dydx, dydalpha = lse_implicit(x, alpha) @@ -47,14 +49,14 @@ def test_dydx_shape(self): def test_dydalpha_shape(self): self.assertEqual(self.dydalpha.shape, self.x.shape) -tests = [TestLSEimplicit1D, +TESTS = [TestLSEimplicit1D, TestLSEimplicit2D] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_lse_scaled.py b/gpfit/tests/t_lse_scaled.py index e629c3b..abd3333 100644 --- a/gpfit/tests/t_lse_scaled.py +++ b/gpfit/tests/t_lse_scaled.py @@ -1,12 +1,15 @@ +"Test lse_scaled" import unittest +from numpy import arange from gpfit.lse_scaled import lse_scaled -from numpy import arange, newaxis + class t_lse_scaled(unittest.TestCase): + "Test lse_implicit" - x = arange(1.,31.).reshape(15,2) + x = arange(1., 31.).reshape(15, 2) alpha = 1. - y, dydx, dydalpha = lse_scaled(x,alpha) + y, dydx, dydalpha = lse_scaled(x, alpha) def test_y_ndim(self): self.assertEqual(self.y.ndim, 1) @@ -33,13 +36,13 @@ def test_dydalpha_size(self): # test alpha is integer? negative? 0? array? -tests = [t_lse_scaled] +TESTS = [t_lse_scaled] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_max_affine.py b/gpfit/tests/t_max_affine.py index 800be6c..2ba0457 100644 --- a/gpfit/tests/t_max_affine.py +++ b/gpfit/tests/t_max_affine.py @@ -1,13 +1,16 @@ +"Test max_affine" import unittest -from gpfit.max_affine import max_affine from numpy import arange, newaxis +from gpfit.max_affine import max_affine + class t_max_affine(unittest.TestCase): + "Test max_affine" - x = arange(0.,16.)[:,newaxis] - ba = arange(1.,7.).reshape(2,3) + x = arange(0., 16.)[:, newaxis] + ba = arange(1., 7.).reshape(2, 3) - y, dydba = max_affine(x,ba) + y, dydba = max_affine(x, ba) def test_y_size(self): self.assertEqual(self.y.size, self.x.size) @@ -21,13 +24,13 @@ def test_dydba_shape(self): def test_dydba_ndim(self): self.assertEqual(self.dydba.ndim, 2) -tests = [t_max_affine] +TESTS = [t_max_affine] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE) diff --git a/gpfit/tests/t_print_fit.py b/gpfit/tests/t_print_fit.py index 2241769..0f9d1fb 100644 --- a/gpfit/tests/t_print_fit.py +++ b/gpfit/tests/t_print_fit.py @@ -1,7 +1,7 @@ "unit tests for gpfit.print_fit module" import unittest -from gpfit.print_fit import print_MA, print_SMA, print_ISMA from numpy import array +from gpfit.print_fit import print_MA, print_SMA, print_ISMA A = array([2, 3, 4, 6, 7, 8, 10, 11, 12]) B = array([1, 5, 9]) diff --git a/gpfit/tests/t_softmax_affine.py b/gpfit/tests/t_softmax_affine.py index 6b78a3d..adfd117 100644 --- a/gpfit/tests/t_softmax_affine.py +++ b/gpfit/tests/t_softmax_affine.py @@ -1,13 +1,16 @@ +"Tests softmax_affine" import unittest -from gpfit.softmax_affine import softmax_affine from numpy import arange, newaxis +from gpfit.softmax_affine import softmax_affine + class t_softmax_affine(unittest.TestCase): + "Tests softmax_affine" - x = arange(0.,16.)[:,newaxis] - params = arange(1.,6.) + x = arange(0., 16.)[:, newaxis] + params = arange(1., 6.) - y, dydp = softmax_affine(x,params) + y, dydp = softmax_affine(x, params) def test_y_size(self): self.assertEqual(self.y.size, self.x.size) @@ -21,13 +24,13 @@ def test_dydp_shape(self): def test_dydp_ndim(self): self.assertEqual(self.dydp.ndim, 2) -tests = [t_softmax_affine] +TESTS = [t_softmax_affine] if __name__ == '__main__': - suite = unittest.TestSuite() - loader = unittest.TestLoader() + SUITE = unittest.TestSuite() + LOADER = unittest.TestLoader() - for t in tests: - suite.addTests(loader.loadTestsFromTestCase(t)) + for t in TESTS: + SUITE.addTests(LOADER.loadTestsFromTestCase(t)) - unittest.TextTestRunner(verbosity=2).run(suite) + unittest.TextTestRunner(verbosity=2).run(SUITE)