赞
踩
"""
+Created on Sun May 10 08:23:48 2015
+
+Author: Josef Perktold
+License: BSD-3
+"""++importnumpyasnp+from._penaltiesimportSCADSmoothed++classPenalizedMixin(object):+"""Mixin class for Maximum Penalized Likelihood
+
+
+ TODO: missing **kwds or explicit keywords
+
+ TODO: do we really need `pen_weight` keyword in likelihood methods?
+
+ """++def__init__(self,*args,**kwds):+super(PenalizedMixin,self).__init__(*args,**kwds)++penal=kwds.pop('penal',None)+# I keep the following instead of adding default in pop for future changes+ifpenalisNone:+# TODO: switch to unpenalized by default+self.penal=SCADSmoothed(0.1,c0=0.0001)+else:+self.penal=penal++# TODO: define pen_weight as average pen_weight? i.e. per observation+# I would have prefered len(self.endog) * kwds.get('pen_weight', 1)+# or use pen_weight_factor in signature+self.pen_weight=kwds.get('pen_weight',len(self.endog))++self._init_keys.extend(['penal','pen_weight'])++++defloglike(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++llf=super(PenalizedMixin,self).loglike(params)+ifpen_weight!=0:+llf-=pen_weight*self.penal.func(params)++returnllf+++defloglikeobs(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++llf=super(PenalizedMixin,self).loglikeobs(params)+nobs_llf=float(llf.shape[0])++ifpen_weight!=0:+llf-=pen_weight/nobs_llf*self.penal.func(params)++returnllf+++defscore(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++sc=super(PenalizedMixin,self).score(params)+ifpen_weight!=0:+sc-=pen_weight*self.penal.grad(params)++returnsc+++defscoreobs(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++sc=super(PenalizedMixin,self).scoreobs(params)+nobs_sc=float(sc.shape[0])+ifpen_weight!=0:+sc-=pen_weight/nobs_sc*self.penal.grad(params)++returnsc+++defhessian_(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight+loglike=self.loglike+else:+loglike=lambdap:self.loglike(p,pen_weight=pen_weight)++fromstatsmodels.tools.numdiffimportapprox_hess+returnapprox_hess(params,loglike)+++defhessian(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++hess=super(PenalizedMixin,self).hessian(params)+ifpen_weight!=0:+h=self.penal.deriv2(params)+ifh.ndim==1:+hess-=np.diag(pen_weight*h)+else:+hess-=pen_weight*h++returnhess+++deffit(self,method=None,trim=None,**kwds):+# If method is None, then we choose a default method ourselves++# TODO: temporary hack, need extra fit kwds+# we need to rule out fit methods in a model that will not work with+# penalization+ifhasattr(self,'family'):# assume this identifies GLM+kwds.update({'max_start_irls':0})++# currently we use `bfgs` by default+ifmethodisNone:+method='bfgs'++iftrimisNone:+trim=False# see below infinite recursion in `fit_constrained++res=super(PenalizedMixin,self).fit(method=method,**kwds)++iftrimisFalse:+# note boolean check for "is False" not evaluates to False+returnres+else:+# TODO: make it penal function dependent+# temporary standin, only works for Poisson and GLM,+# and is computationally inefficient+drop_index=np.nonzero(np.abs(res.params)<1e-4)[0]+keep_index=np.nonzero(np.abs(res.params)>1e-4)[0]+rmat=np.eye(len(res.params))[drop_index]++# calling fit_constrained raise+# "RuntimeError: maximum recursion depth exceeded in __instancecheck__"+# fit_constrained is calling fit, recursive endless loop+ifdrop_index.any():+# todo : trim kwyword doesn't work, why not?+#res_aux = self.fit_constrained(rmat, trim=False)+res_aux=self._fit_zeros(keep_index,**kwds)+returnres_aux+else:+returnres++
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。