R语言向matlab转化,再次求助R语言转为MATLAB

该博客介绍了自适应学习策略,包括硬策略、线性策略、对数策略和混合策略的实现及其梯度计算。通过一个玩具示例展示了如何在无约束和有约束条件下优化学习进度,并计算相应的课程安排。此外,还探讨了这些策略的排名相关性,并使用 Kendall's Tau 相关系数评估了不同策略的课程排列效果。
摘要由CSDN通过智能技术生成

(代码1)require(Matrix)

require(tseries)

#self-paced function: hard scheme

eval_f_hard

obj = v %*% loss - lambda*sum(v)

return(obj)

}

#self-paced gradient: hard scheme

grad_f_hard

grads = loss - lambda

return(grads)

}

#self-paced function: linear scheme

eval_f_linear

obj = v %*% loss + 1/2*lambda*sum(v^2) - lambda*sum(v)

return(obj)

}

#self-paced gradient: linear scheme

grad_f_linear

grads = loss + lambda * v - lambda

return(grads)

}

#self-paced function: log scheme

eval_f_log

zeta = 1-lambda

obj = v %*% loss + zeta*sum(v) - sum(zeta^v)/log(zeta)

return(obj)

}

#self-paced gradient: log scheme

grad_f_log

zeta = 1-lambda

grads = loss + zeta - zeta^v

return(grads)

}

#self-paced function: mixture scheme

eval_f_mixture

#lambda here represents lambda1 in paper (lambda > lambda2 > 0)

zeta = lambda*lambda2/(lambda-lambda2)

obj = v %*% loss - zeta * sum(log(v+zeta/lambda))

return(obj)

}

#self-paced gradient: mixture scheme

grad_f_mixture

#lambda here represents lambda1 in paper (lambda > lambda2 > 0)

zeta = lambda*lambda2/(lambda-lambda2)

grads = loss - zeta*lambda/(v*lambda+zeta)

return(grads)

}

#closed-form solution: hard scheme

closedform_hard

v = replicate(length(loss), 0)

v[which(loss>=lambda)] = 0

v[which(loss

return(v)

}

#closed-form solution: linear scheme

closedform_linear

v = replicate(length(loss), 0)

v = -1/lambda*loss+1

v[which(loss>=lambda)]=0

return(v)

}

#closed-form solution: log scheme

closedform_log

zeta = 1-lambda

v = replicate(length(loss), 0)

v = 1/log(zeta)*log(loss+zeta)

v[which(loss>=lambda)]=0

return(v)

}

closedform_mixture

#lambda here represents lambda1 in paper (lambda > lambda2 > 0)

zeta = (lambda*lambda2)/(lambda-lambda2)

v = (lambda-loss)*zeta/(lambda*loss)

v[which(loss>=lambda)] = 0

v[which(loss<=lambda2)] = 1

return(v)

}

#Obtain the curriculum (ordered by loss)

getcurriculum_hard

solution[which(solution>0.9)] = 1        #caliberate the solution to 0 and 1 (the solution may have some round errors)

solution[which(solution<0.1)] = 0

#rank the samples

idx1 = which(solution==1)

idx1 = idx1[sort(loss[idx1],index.return=TRUE)$ix]

idx0 = which(solution==0)

idx0 = idx0[sort(loss[idx0],index.return=TRUE)$ix]

return(c(idx1,idx0))

}

(代码2)source("lib.r")

print("##############################################")

print("This script lists the implementation of SPCL, and a toy example in our paper.")

print("##############################################")

######################################

######## start of the script #########

######################################

#0) load data

id = letters[1:6]

myloss = c(0.1,0.2,0.4,0.6,0.5,0.3)

print("####### input loss ######")

print(myloss)

#1) optimize v without curriculum constraints

{

v0 = replicate(length(id),0)                #initial values

tolerance = 10^-7                                        #a small constant for optmization accuracy

lambda = 0.83333                                        #parameter in self-paced learning

u2 = diag(replicate(length(v0),1))                #v >= 0 - 10^-7

u3 = -1*diag(replicate(length(v0),1))        #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7

ui = rbind(u2,u3)

c2 = replicate(length(v0),-1*tolerance)         #v >= 0 - 10^-7

c3 = -1*replicate(length(v0),1+tolerance) #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7

ci = c(c2,c3)

ui %*% v0 - ci >= 0                #check the feasibility of the initial values v0

#linear soft scheme

solution1 = constrOptim(theta = v0,

f = eval_f_hard,

grad = grad_f_hard,

ui = ui,

ci = ci,

loss = myloss, lambda=lambda)$par

#log soft scheme

solution2 = constrOptim(theta = v0,

f = eval_f_log,

grad = grad_f_log,

ui = ui,

ci = ci,

loss = myloss, lambda=lambda)$par

print("##############################################")

print("--hard scheme w/o curriculum Constraint")

print(solution1)

print("--log scheme w/o curriculum Constraint")

print(solution2)

#compare with the closed-form solution for unconstrained problems

closed_solution1 = closedform_hard(myloss,lambda)

closed_solution2 = closedform_log(myloss,lambda)

print("##############################################")

print(paste("--linear scheme MSE with closed-form solution", sum((solution1-closed_solution1)^2)))

print(paste("--log scheme MSE with closed-form solution", sum((solution2-closed_solution2)^2)))

print("##############################################")

#print the curriculum (ordered by loss)

print("SPL curriculum (ordered by loss)")

print(id[getcurriculum_hard(solution1, myloss)])

}

#4) calculate curriculum constraints

A = matrix(0, nrow=length(id), ncol=1)

A[,1] = c(0.1, 0.0, 0.4, 0.3, 0.5, 1.0)                #curriculum constraints matrix

c = c(1)                                                                        #curriculum constraints vector

print("####### A matrix ######")

print(A)

#5) optimize v with modality constraint (A)

{

v0 = replicate(length(id),0)                #initial values

tolerance = 10^-7                                        #a small constant for optmization accuracy

lambda=0.8333                                                #parameter in self-paced learning

u1 = -1*t(A)                                                                #-Av >= -c        i.e.        Av <= c

u2 = diag(replicate(length(v0),1))                        #v >= 0 - 10^-7

u3 = -1*diag(replicate(length(v0),1))                #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7

ui = rbind(u1,u2,u3)

c1 = -1*c - tolerance                                                #-Av >= -c        i.e.        Av <= c

c2 = replicate(length(v0),-1*tolerance)                 #v >= 0 - 10^-7

c3 = -1*replicate(length(v0),1+tolerance)         #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7

ci = c(c1,c2,c3)

#check the feasibility of initial values

ui %*% v0 - ci >= 0

solution3 = constrOptim(theta = v0,

f = eval_f_hard,

grad = grad_f_hard,

ui = ui,

ci = ci,

loss = myloss, lambda=lambda)$par

#ui %*% solution3 - ci >= 0

print("--hard scheme w/ curriculum Constraint")

print(solution3)

print("SPCL curriculum:")

print(id[sort(solution3,index.return=TRUE, decreasing=TRUE)$ix])

}

######################################

########   Rank Correlation  #########

######################################

gt = c("a","b","c","d","e","f")

cl = c("b","a","d","c","e","f")

spl = id[getcurriculum_hard(solution1, myloss)]

spld = id[sort(solution3,index.return=TRUE, decreasing=TRUE)$ix]

gtrank = unlist(lapply(gt, function(x){which(gt==x)}))

clrank = unlist(lapply(cl, function(x){which(gt==x)}))

splrank = unlist(lapply(spl, function(x){which(gt==x)}))

spldrank = unlist(lapply(spld, function(x){which(gt==x)}))

print(paste("CL rank correlation =", cor(cbind(clrank,gtrank), method="kendall", use="pairwise")[1,2]))

print(paste("SPL rank correlation =", cor(cbind(splrank,gtrank), method="kendall", use="pairwise")[1,2]))

print(paste("SPCL rank correlation =", cor(cbind(spldrank,gtrank), method="kendall", use="pairwise")[1,2]))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值