python中的super含义_父类python中super关键字的含义

"""

+Created on Sun May 10 08:23:48 2015

+

+Author: Josef Perktold

+License: BSD-3

+"""++importnumpyasnp+from._penaltiesimportSCADSmoothed++classPenalizedMixin(object):+"""Mixin class for Maximum Penalized Likelihood

+

+

+ TODO: missing **kwds or explicit keywords

+

+ TODO: do we really need `pen_weight` keyword in likelihood methods?

+

+ """++def__init__(self,*args,**kwds):+super(PenalizedMixin,self).__init__(*args,**kwds)++penal=kwds.pop('penal',None)+# I keep the following instead of adding default in pop for future changes+ifpenalisNone:+# TODO: switch to unpenalized by default+self.penal=SCADSmoothed(0.1,c0=0.0001)+else:+self.penal=penal++# TODO: define pen_weight as average pen_weight? i.e. per observation+# I would have prefered len(self.endog) * kwds.get('pen_weight', 1)+# or use pen_weight_factor in signature+self.pen_weight=kwds.get('pen_weight',len(self.endog))++self._init_keys.extend(['penal','pen_weight'])++++defloglike(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++llf=super(PenalizedMixin,self).loglike(params)+ifpen_weight!=0:+llf-=pen_weight*self.penal.func(params)++returnllf+++defloglikeobs(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++llf=super(PenalizedMixin,self).loglikeobs(params)+nobs_llf=float(llf.shape[0])++ifpen_weight!=0:+llf-=pen_weight/nobs_llf*self.penal.func(params)++returnllf+++defscore(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++sc=super(PenalizedMixin,self).score(params)+ifpen_weight!=0:+sc-=pen_weight*self.penal.grad(params)++returnsc+++defscoreobs(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++sc=super(PenalizedMixin,self).scoreobs(params)+nobs_sc=float(sc.shape[0])+ifpen_weight!=0:+sc-=pen_weight/nobs_sc*self.penal.grad(params)++returnsc+++defhessian_(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight+loglike=self.loglike+else:+loglike=lambdap:self.loglike(p,pen_weight=pen_weight)++fromstatsmodels.tools.numdiffimportapprox_hess+returnapprox_hess(params,loglike)+++defhessian(self,params,pen_weight=None):+ifpen_weightisNone:+pen_weight=self.pen_weight++hess=super(PenalizedMixin,self).hessian(params)+ifpen_weight!=0:+h=self.penal.deriv2(params)+ifh.ndim==1:+hess-=np.diag(pen_weight*h)+else:+hess-=pen_weight*h++returnhess+++deffit(self,method=None,trim=None,**kwds):+# If method is None, then we choose a default method ourselves++# TODO: temporary hack, need extra fit kwds+# we need to rule out fit methods in a model that will not work with+# penalization+ifhasattr(self,'family'):# assume this identifies GLM+kwds.update({'max_start_irls':0})++# currently we use `bfgs` by default+ifmethodisNone:+method='bfgs'++iftrimisNone:+trim=False# see below infinite recursion in `fit_constrained++res=super(PenalizedMixin,self).fit(method=method,**kwds)++iftrimisFalse:+# note boolean check for "is False" not evaluates to False+returnres+else:+# TODO: make it penal function dependent+# temporary standin, only works for Poisson and GLM,+# and is computationally inefficient+drop_index=np.nonzero(np.abs(res.params)<1e-4)[0]+keep_index=np.nonzero(np.abs(res.params)>1e-4)[0]+rmat=np.eye(len(res.params))[drop_index]++# calling fit_constrained raise+# "RuntimeError: maximum recursion depth exceeded in __instancecheck__"+# fit_constrained is calling fit, recursive endless loop+ifdrop_index.any():+# todo : trim kwyword doesn't work, why not?+#res_aux = self.fit_constrained(rmat, trim=False)+res_aux=self._fit_zeros(keep_index,**kwds)+returnres_aux+else:+returnres++

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值