以下是关于整个模拟程序的解释与程序:
加载所需要的包,生成所需要的数据:目标分布服从威布尔分布,截断分布服从指数分布。
install.packages("rootSolve")
install.packages("Rsolnp")
library(Rsolnp)
library(rootSolve)
#################################################################
##### define a new log function log_n ###
log_n <- function(z,n){
an=n^2
ind=(z<1/an)
y1 = log(1/an)-1.5+2*an*z-(an*z)^2/2
y1[!ind]=0
z[ind] = 1
y2=log(z)
y=y1+y2
return(y)
}
################################################################################
#### Function used to calculate log( gamma(N+1)/gamma(N-n+1) )
digam<-function(n_big, n_small)
{ out=0
if(n_small>0 & n_big>=n_small) out=sum(log(n_big+1-c(1:n_small)))
out
}
###########################################################
################ Function used to generate data
gen_obs <- function(wshape,wscale,erate,n_big){
obs.x <- rweibull(n_big,shape = wshape,scale = wscale)
obs.y <- rexp(n_big,rate = erate)
ind =(obs.x > obs.y)
obs <- data.frame(x=obs.x[ind],y=obs.y[ind])
return(obs)
}
1,直接极大似然函数
max.like函数用于求解全似然函数的最优解:
#########################################################
###### full log-likelihood function
###
### Function max.like is to calculate the estiamtor of [N,alpha,theta],N is n_big
### para= [N,alpha,theta] in max.like , corresponding estimate is par_hat
###
### Before the function log_likelihood , define a data frame-obs
###
##################################################################
max.like<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
log_likelihood <- function(para){ #negative likelihood
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[3]), n) - log_n(1+lambda*(pexp(x,para[3])-para[2]),n)
t <- sum(ter_log)+(para[1]-n)*log(1-para[2])+digam(para[1],n)
return(t)
}
z <- (-1)*optimize(fn, interval=c(-1000,1000), tol=0.0001)$objective
return(z)
}
par_hat <- nlminb( c(n,0.5, 1), log_likelihood, lower=c(n, 0.0001, 0.001), upper=c(100*n, 0.999, 100))$par
return(par_hat)
}
max.like2函数用于当N取真值N0时,求 α \alpha α和 θ \theta θ的极大似然估计,进而计算N的似然比函数,进一步构建全似然方法的覆盖率。这里,在模拟的时候每次N设定不同的值时,这里的N0都要调整为相同的值,否则结果偏差很大。
###################################################################################################
######## calculate full likelihood confidence interval for parameter par
###
### max.like2 is to calculate the estiamtor of [alpha,theta] when given true n_big
### para = [alpha,theta] in max.like2, corresponding estimate is par_Nhat
###
### con_int is to calculate (1-a) Interval lower bound , para_star is a parameter vector
### para_star = [par_hat,par_Nhat,N0,1-a]
###
###
####################################################################
max.like2<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
log_likelihood.N0 <- function(para){ #negative likelihood
# N take true value:N0
N0=300
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[2]), n) - log_n(1+lambda*(pexp(x,para[2])-para[1]),n)
t <- sum(ter_log)+digam(N0,n)+(N0-n)*log(1-para[1])
return(t)
}
z <- (-1)*optimize(fn, interval=c(-500,500), tol=0.0001)$objective
return(z)
}
par_Nhat <- nlminb(c(0.5, 1),log_likelihood.N0,lower=c(0.0001, 0.001), upper=c(0.999, 100))$par
return(par_Nhat)
}
计算全似然方法的覆盖率:
###########################################################################################
##### Construct (1-a) confidence interval lower bound for full likelihood
###
con_int <- function(obs,para_star,N_true){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
log_likelihood <- function(para){ ##negative likelihood
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[3]), n) - log_n(1+lambda*(pexp(x,para[3])-para[2]),n)
t <- sum(ter_log)+(para[1]-n)*log(1-para[2])+digam(para[1],n)
return(t)
}
z <- optimize(fn, interval=c(-500,500), tol=0.001)$objective
return(z)
}
clog_likelihood <- function(N){ # N is scalar
fn <- function(lambda){
ter_log <- log_n(dexp(y,para_star[5]),n) - log_n(1+lambda*(pexp(x,para_star[5])-para_star[4]),n)
t <- sum(ter_log)+(N-n)*log(1-para_star[4])+digam(N,n)
return(t)
}
z <- optimize(fn, interval=c(-500,500), tol=0.001)$objective
return(z)
}
R2N <- 2*(log_likelihood(para = para_star[1:3]) - clog_likelihood(N=N_true))
ifelse(R2N <= qchisq(tail(para_star,1),df=1),1,0)
}
#####################################################################################
######### calculate Coverage percentage for full likeligood
## INPUT:
## int : Vector 0 and 1 to determine if the true value is in the confidence interval
## 1 represents the truth value in the confidence interval
## K : Number of simulations
##
## OUTPUT:
## P_f : Coverage percentage for full likrlihood
##
######################################################
p_f <- function(K,int){
sum(int)/K
}
条件似然(Wang)的极大似然估计:
######################################################################
########## Wang: condition likelihood
###
### Function max.clike is to calculate the estiamtor of [N,alpha,theta],N is n_big
### para = [N,alpha,theta] in max.clike , corresponding estimate is par_tilde
###
### sigma : the variance of the asymptotic distribution
###
###
###
###############################################################
max.clike<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
llc <- function(theta){
ter_y = log_n(dexp(y,theta),n)-log_n(pexp(x,theta),n)
-sum(ter_y)
}
theta_tilde = nlminb(1, llc, lower=c(0.001), upper=c(100))$par
N_tilde = sum(1/pexp(x, theta_tilde))
alpha_tilde = n/N_tilde
par_tilde <- c(N_tilde, alpha_tilde, theta_tilde)
#names(par_tilde) <- c("N_tilde", "alpha_tilde", "theta_tilde")
return(par_tilde)
}
计算条件似然方法的覆盖率:
max.clike<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
llc <- function(theta){
ter_y = log_n(dexp(y,theta),n)-log_n(pexp(x,theta),n)
-sum(ter_y)
}
theta_tilde = nlminb(1, llc, lower=c(0.001), upper=c(100))$par
N_tilde = sum(1/pexp(x, theta_tilde))
alpha_tilde = n/N_tilde
par_tilde <- c(N_tilde, alpha_tilde, theta_tilde)
#names(par_tilde) <- c("N_tilde", "alpha_tilde", "theta_tilde")
return(par_tilde)
}
#########################################################################
######################## Estimates of sigma #######
sigma_cest<- function(obs,theta_tilde,N_tilde){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
#estimate of density function of truncated variable
g_tilde <- function(t){
ifelse(t >= 0,theta_tilde*exp(-theta_tilde*t),0)
}
#derivative of g about theta_tilde
g1_tilde <- function(t){
ifelse(t >= 0,(1+theta_tilde^2)*exp(-theta_tilde*t),0)
}
#estimate of distribution function
G_tilde <- function(t){
ifelse(t>=0,1-exp(-theta_tilde*t),0)
}
#derivative of G about theta
G1_tilde <- function(t){
ifelse(t>=0,t*exp(-theta_tilde*t),0)
}
g <- function(z) {
g1_tilde(z)^2/g_tilde(z)
}
integ <- NULL
for (i in 1:n) {
xi <- x[i]
ter <- integrate(g,0,xi)$value
integ <- c(integ,ter)
}
beta_tilde <- (1/N_tilde)*sum((G_tilde(x)^(-2)))
V23_tilde <- (1/N_tilde)*sum(G1_tilde(x)/(G_tilde(x)^2))
V33_tilde <- (1/N_tilde)*(sum((G1_tilde(x)^2)/(G_tilde(x)^2))-sum(integ/G_tilde(x)))
return(beta_tilde-1-V23_tilde*(1/V33_tilde)*V23_tilde)
}
#####################################################################################
######### calculate Coverage percentage for condition likeligood
## INPUT:
## K : Number of simulations
## sigma_tilde : the condition estimate of sigma,is a vector in p_c
## N_tilde : the condition estimate of N,is a vector in p_c
## N_true : true n_big
## a : Significance level
##
## OUTPUT:
## P_c : Two type Coverage percentage for condition likelihood
##
######################################################
p_c <- function(K,sigma_tilde,N_tilde,N_true,a){
I2 <- ifelse(abs(N_tilde^(-1/2)*(N_tilde-N_true))<=sqrt(sigma_tilde)*qnorm(1-a/2),1,0)
I3 <- ifelse(abs(N_tilde^(1/2)*log(N_tilde/N_true))<=sqrt(sigma_tilde)*qnorm(1-a/2),1,0)
list(p2=sum(I2)/K,p3=sum(I3)/K)
}
2,EM算法
#######################################################################################################
######################### EM Algorithm for full likelihood #####################################
#############################################################################################
#### k1 : The number of unique values of observed x
#### par =[p1,p2,...,pk1,theta]: (k1+1) dim vector,initial value of parameter
#### ep=1e-5:precision
#### it_max=1e4:The number of maximum iteration
##################################################################################
########################## Iterative process of EM algorithm ###################
EM_it <- function(obs){
n = nrow(obs)
x = obs[,1]
y = obs[,2]
tilde_x = unique(x)
k1 = length(tilde_x)
p_0 = 1/k1
theta_0 = sum(y)/n
alpha_0 <- sum(pexp(tilde_x,theta_0)*p_0)
par_0 = c(rep(p_0,k1),alpha_0,theta_0)
#indicator <- function(x,t){ifelse(x==t,1,0)}
# expoential density function
g <- function(x,theta){
theta*exp(-1*theta*x)
}
my.em <- function(obs,par=par_0,ep=0.001,it_max=50000){
n = nrow(obs)
x = obs[,1]
y = obs[,2]
tilde_x = unique(x)
k1 = length(tilde_x)
it_k <- 0
while(it_k <= it_max){
par0 <- par
### iteration
a <- NULL
for (j in 1:k1) {
aj <- sum(ifelse(x==tilde_x[j],1,0)) +(n/par0[k1+1])*par0[j]*(1-pexp(tilde_x[j],par0[k1+2]))
a <- c(a,aj)
}
fn <- function(theta){
b1 = sum(log_n(dexp(y,theta),n))
log_dexp <- function(x){
(log(theta)-theta*x)*par0[k1+2]*exp(-1*par0[k1+2]*x)
}
intb <- NULL
for (j in 1:k1) {
intbj = integrate(f = log_dexp, tilde_x[j],Inf)$value
intb = c(intb,intbj)
}
b2 = (n/par0[k1+1])*sum(par0[1:k1]*intb)
return(b1+b2)
}
p_1 <- a/sum(a)
theta_1 <- optimize(f=fn,interval = c(0.0001,100),tol = 0.0001,maximum = TRUE)$maximum
alpha_1 <- sum(pexp(x,theta_1)*p_1)
par_1 <- c(p_1,alpha_1,theta_1)
par <- par_1
## Whether to stop iteration
norm <- sqrt(t(par-par0)%*%(par-par0))
if(norm < ep)
{
return(par)
break
}
else
{it_k <- it_k+1}
} # while
}#my.em
par <- my.em(obs,par=par_0,ep=0.001,it_max=50000)
p_em <- par[1:k1]
alp_em <- par[k1+1]
the_em <- par[k1+2]
N_em <- n/alp_em
par_em <- data.frame(N_em=N_em,alp_em=alp_em,the_em)
#return(list(p_em=p_hat,the_alp_em= the_alp_hat))
return(par_em)
}#EM.it
## EM算法无法计算的覆盖率
3,解方程的方法
#########################################################################
##################### the method of solving the equation ################
See <- function(obs){
n <- nrow(obs)
x <- obs[,1]
y <- obs[,2]
## expoential density function
g <- function(x,theta){
ifelse(x>=0,theta/exp(theta*x),0)
}
## The first derivative of theta (g)
g1 <- function(x,theta){
ifelse(x>=0,(1-theta*x)/exp(theta*x),0)
}
## expoential distribution function
G <-function(x,theta){
ifelse(x>=0,1-1/exp(theta*x),0)
}
## the first derivative of theta (G)
G1 <- function(x,theta){
ifelse(x>=0,x/exp(theta*x),0)
}
### parameter is a vector par=(N,alpha,theta,lambda)
model <- function(par){
c(F1 = sum(1/((par[1]-n)+(1:n)))+log_n(1-par[2],n),
F2 = (par[1]-n)/(1-par[2])- par[4]*sum(1/(1+par[4]*(G(x,par[3])-par[2]))),
F3 = sum(g1(y,par[3])/g(y,par[3])) - par[4]*sum(G1(x,par[3])/(1+par[4]*(G(x,par[3])-par[2]))),
F4 = sum((G(x,par[3])-par[2])/(1+par[4]*(G(x,par[3])-par[2])))
)
}
par0 <- c(n+10,0.5,1,1) # the initial value of par,affect the simulating results
par_See <- multiroot(f = model,start = par0,rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root
return(par_See)
}
4.1,构建截面似然(内部使用解方程的方法:multiroot函数)
###########################################################################
################### the fourth method ###################################
profile_max.like1 <- function(obs){
n <- nrow(obs)
x <- obs[,1]
y <- obs[,2]
## expoential density function
g <- function(x,theta){
ifelse(x>=0,theta/exp(theta*x),0)
}
## The first derivative of theta (g)
g1 <- function(x,theta){
ifelse(x>=0,(1-theta*x)/exp(theta*x),0)
}
## expoential distribution function
G <-function(x,theta){
ifelse(x>=0,1-1/exp(theta*x),0)
}
## the first derivative of theta (G)
G1 <- function(x,theta){
ifelse(x>=0,x/exp(theta*x),0)
}
### the profile likelihood of N by solving the equation about (alpha,theta,lambda)
f <- function(N){
## par is a vector, par = (alpha,theta,lambda)
model0 <- function(par){
c(
F1 = (N-n)/(1-par[1])- par[3]*sum(1/(1+par[3]*(G(x,par[2])-par[1]))),
F2 = sum(g1(y,par[2])/g(y,par[2])) - par[3]*sum(G1(x,par[2])/(1+par[3]*(G(x,par[2])-par[1]))),
F3 = sum((G(x,par[2])-par[1])/(1+par[3]*(G(x,par[2])-par[1])))
)
}
par0 <- c(0.5,1,1) # the initial value of par
atl <- multiroot(f = model0,start = par0, rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root # the root of (alpha,theta,lambda)
sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-atl[1],n)+sum(log_n(g(y,atl[2]),n))-sum(log_n(1+atl[3]*(G(x,atl[2])-atl[1]),n))
}
N_est <- optimize(f, interval=c(n,400), tol=0.001,maximum = TRUE)$objective # the estimate of N
## par is a vector, par = (alpha,theta,lambda)
model <- function(par){
c(
F1 = (N_est-n)/(1-par[1])- par[3]*sum(1/(1+par[3]*(G(x,par[2])-par[1]))),
F2 = sum(g1(y,par[2])/g(y,par[2])) - par[3]*sum(G1(x,par[2])/(1+par[3]*(G(x,par[2])-par[1]))),
F3 = sum((G(x,par[2])-par[1])/(1+par[3]*(G(x,par[2])-par[1])))
)
}
par0 <- c(0.5,1,1) # the initial value of par
atl_est <- multiroot(f = model,start = par0, rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root
## profile_par is a vector,profile_par.est=[N_est,alpha_est,theta_est,lambda_est]
profile_par.est <- c(N_est,atl_est)
return(profile_par.est)
}
4.2,构建截面似然(内部使用极大化似然的方法:nlminb函数)
############## Using the function nlminb to solve equations ######################
############################################################################
profile_max.like2 <- function(obs){
n <- nrow(obs)
x <- obs[,1]
y <- obs[,2]
## expoential density function
g <- function(x,theta){
ifelse(x>=0,theta/exp(theta*x),0)
}
## expoential distribution function
G <-function(x,theta){
ifelse(x>=0,1-1/exp(theta*x),0)
}
############################################
###
## f is the profile likelihood of N
## par is a vector, par = (alpha,theta,lambda)
####################################################
f <- function(N) {
fun <- function(par){
(-1)*(sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-par[1],n)+sum(log_n(g(y,par[2]),n))-sum(log_n(1+par[3]*(G(x,par[2])-par[1]),n)))
}
atl <- nlminb(start = c(0.5,5,5),objective = fun,lower = c(0.001,0.001,-500),upper = c(0.9999,100,500))$par
sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-atl[1],n)+sum(log_n(g(y,atl[2]),n))-sum(log_n(1+atl[3]*(G(x,atl[2])-atl[1]),n))
}
N_est <- optimize(f, interval=c(n,500), tol=0.001,maximum = TRUE)$objective
### Replace N with N_est
fun <- function(par){
(-1)*(sum(log_n((N_est-n)+(1:n),n))+(N_est-n)*log_n(1-par[1],n)+sum(log_n(g(y,par[2]),n))-sum(log_n(1+par[3]*(G(x,par[2])-par[1]),n)))
}
atl_est <- nlminb(start = c(0.5,5,5),objective = fun,lower = c(0.001,0.001,-500),upper = c(0.9999,100,500))$par
## par_full.est is a vector: par_full.est=[N_est,alpha_est,theta_est,lambda_est]
par_full.est <- c(N_est,atl_est)
return(par_full.est)
}
5,四种模拟结果
5.1模拟参数设定
#################################################################################
#### Simulation in one scenario
#################################################################################
N_true <- 200
K <- 2000 #repeat times
a <- 0.05 #significance level
wshape <- 5
wscale <- 2
erate <- 2 # true theta
theta_true <- erate
comf <- function(x){
pexp(x,rate = erate)*dweibull(x,shape = wshape,scale = wscale)
}
alpha_true <- integrate(f=comf,lower = 0,upper = Inf)$value;alpha_true # true alpha
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
此处 的N取值为200,因此前面max.like2函数中N0取值应更改为200. 为考虑不同的入样概率,选择以下几种情形的参数设置( α \alpha α是入样概率):
参数 | wshape | wscale | erate( θ \theta θ) | α \alpha α |
---|---|---|---|---|
设定1 | 5 | 2 | 2 | 0.963 |
设定2 | 1 | 2 | 2 | 0.8 |
设定3 | 1 | 2 | 1 | 0.667 |
设定4 | 1 | 1 | 1 | 0.5 |
5.2模拟结果以及时间
所有的K=2000,N=200.
1,直接极大似然方法
####################### simulating Result ################
### 方法1
par_est =NULL
N_hat = NULL
N_tilde = NULL
sigma_tilde = NULL #条件似然方法的sigma的估计
int <- NULL
t0 <- proc.time()
for (i in 1:K) {
obs <- gen_obs(wshape, wscale, erate, N_true)
par_est <- rbind(par_est, c(max.like(obs), max.clike(obs)))
ter_para_star <- c(max.like(obs), max.like2(obs),N_true,1-a)
N_hat <- rbind(N_hat,max.like(obs)[1])
N_tilde <- rbind(N_tilde,max.clike(obs)[1])
sigma_tilde <- rbind(sigma_tilde,sigma_cest(obs = obs,theta_tilde = max.clike(obs)[3],N_tilde = max.clike(obs)[1]))
ter_int <- con_int(obs = obs,para_star = ter_para_star,N_true = N_true)
int <- rbind(int,ter_int)
print(i)
}
t <- proc.time()-t0
print(paste0("直接极大似然方法执行时间:",t[3][[1]],"秒"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m <- apply(par_est, 2, mean);m
sd <- apply(par_est, 2, sd);sd
bias <- abs(apply(par_est,2,mean) - par_true);bias
percentage_f <- p_f(K,int = int);percentage_f
percentage_c <- p_c(K,sigma_tilde,N_tilde,N_true,a);percentage_c
模拟结果表
模拟结果表中依次为参数估计,参数估计标准差和偏差。
参数估计 | N_f | α \alpha α_f | θ \theta θ_f | percentage_f | N_c | α \alpha α_c | θ \theta θ_c | percentage_c2 | percentage_c3 | 运行时间 |
---|---|---|---|---|---|---|---|---|---|---|
设定1 | 199.3616 3.6174 0.6384 | 0.9635 0.0108 0.0004 | 2.0320 0.1888 0.0320 | 94.05% | 200.1870 3.6645 0.1870 | 0.9620 0.0111 0.0011 | 2.0065 0.1869 0.0065 | 88.15% | 87.9% | 1416秒 |
设定2 | 198.2373 17.6154 1.7627 | 0.8093 0.0533 0.0093 | 2.0228 0.2109 0.0228 | 91% | 200.2741 19.3338 0.2741 | 0.8037 0.0563 0.0037 | 2.0092 0.2097 0.0092 | 85.8% | 86.1% | 1424.87秒 |
设定3 | 198.9731 32.7968 1.0269 | 0.6793 0.0749 0.0127 | 1.0237 0.1430 0.0237 | 91.7% | 201.5994 34.7736 1.5994 | 0.6730 0.0764 0.0063 | 1.0139 0.1420 0.0139 | 83.05% | 83.4% | 1182.45秒 |
设定4 | 201.2625 45.9229 1.2625 | 0.5156 0.0918 0.0156 | 1.0285 0.2315 0.0285 | 93.75% | 205.3044 47.6765 5.3044 | 0.5074 0.0920 0.0074 | 1.0104 0.2299 0.0104 | 77.35% | 76.3% | 918.92秒 |
2,EM方法
par_em <- NULL
t0 <- proc.time()
for (i in 1:K) {
obs <- gen_obs(wshape, wscale, erate, N_true)
ter_em <- EM_it(obs)
par_em <- rbind(par_em,ter_em)
print(i)
}
t <- proc.time()-t0
print(paste0("EM方法执行时间:",t[3][[1]],"秒"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_em <- apply(par_em, 2, mean);m_em
sd_em <- apply(par_em, 2, sd);sd_em
bias_em <- abs(apply(par_em,2,mean) - par_true[1:3]);bias_em
EM方法模拟结果表:
结果依次为参数估计、参数估计标准差和偏差。
参数估计 | N_em | α \alpha α_em | θ \theta θ_em | 运行时间 |
---|---|---|---|---|
设定1 | 200.2807 3.5829 0.2807 | 0.9623 0.0108 0.0008 | 2.0119 0.1832 0.0119 | 3809.43秒 |
设定2 | 198.9456 14.6940 1.0544 | 0.8070 0.0492 0.0070 | 2.0127 0.2060 0.0127 | 3711.39秒 |
设定3 | 199.8875 25.5943 0.1125 | 0.6768 0.0688 0.0101 | 1.0105 0.1425 0.0105 | 3803.96秒 |
设定4 | 202.7574 44.7711 2.7574 | 0.5103 0.0916 0.0103 | 1.0161 0.2407 0.0161 | 7032.74秒 |
3,解方程方法
par_See <- NULL # par_See = [N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <- gen_obs(wshape, wscale, erate, N_true)
par_See <- rbind(par_See,See(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("解方程方法执行时间:",t[3][[1]],"秒"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_See <- apply(par_See[,1:3], 2, mean);m_See
sd_See <- apply(par_See[,1:3], 2, sd);sd_See
bias_See <- abs(apply(par_See[,1:3],2,mean) - par_true[1:3]);bias_See
程序运行中出现警告,
4,构建截面似然方程方法
# 内部解方程方法
par_profile1 <- NULL # par_profile1 =[N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <- gen_obs(wshape, wscale, erate, N_true)
par_profile1 <- rbind(par_profile1,profile_max.like1(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("构建截面似然(内部解方程)方法执行时间:",t[3][[1]],"秒"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_profile1 <- apply(par_profile1[,1:3], 2, mean);m_profile1
sd_profile1 <- apply(par_profile1[,1:3], 2, sd);sd_profile1
bias_profile1 <- abs(apply(par_profile1[,1:3],2,mean) - par_true[1:3]);bias_profile1
# 内部直接极大似然(nlminb)
par_profile2 <- NULL # par_profile2 =[N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <- gen_obs(wshape, wscale, erate, N_true)
par_profile2 <- rbind(par_profile2,profile_max.like2(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("构建截面似然(内部解方程)方法执行时间:",t[3][[1]],"秒"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_profile2 <- apply(par_profile2[,1:3], 2, mean);m_profile2
sd_profile2 <- apply(par_profile2[,1:3], 2, sd);sd_profile2
bias_profile2 <- abs(apply(par_profile2[,1:3],2,mean) - par_true[1:3]);bias_profile2
最后两种求解方法速度特别快,但是用到的R函数求解出来的结果都有一些问题,具体问题见文档。