[KDL库学习]基于LM算法的运动学逆解

本文介绍了KDL库,一个包含运动学和动力学功能的库,特别关注了其逆解算法,包括基于LM算法的逆解方法。LM算法通过SVD分解求解逆矩阵,确保稳定性。源码解析部分详细展示了如何使用LM算法进行迭代求解,包括误差计算、雅克比矩阵、拉格朗日乘子等关键步骤。
摘要由CSDN通过智能技术生成

一、简介

1.1 KDL库简介

KDL库GitHub地址:https://github.com/orocos/orocos_kinematics_dynamics
KDL库包括了运动学、动力学、轨迹插值等功能。
KDL库运动学逆解共有三种算法,都是数值解法:
1、class ChainIkSolverPos_NR——Netwon Rapson迭代法;
2、class ChainIkSolverPos_NR_JL ——Netwon Rapson迭代法(关节限制);
3、class ChainIkSolverPos_LMA ——LM(Levenberg Marquartdt)迭代算法;
基于LM算法的逆解稳定性相对稍微好一点。

这里有KDL库的一些解析。

1.2 LM算法原理简介

在这里插入图片描述

图1.1 LM算法步骤

在这里插入图片描述

图1.2 LM算法通过SVD分解进行逆矩阵求解

其中,通过sigma的奇异值进行平方运算加上lambda的公式为:
在这里插入图片描述

图1.3

参考文献:
[1].Introduction to Inverse Kinematics with Jacobian Transpose, Pseudoinverse and Damped Least Squares methods. Samuel R. Buss.University of California. 2019.
[2] METHODS FOR NON-LINEAR LEAST SQUARES PROBLEMS.

二、源码解析

int ChainIkSolverPos_LMA::CartToJnt(const KDL::JntArray& q_init, const KDL::Frame& T_base_goal, KDL::JntArray& q_out) {
  if (nj != chain.getNrOfJoints())
    return (error = E_NOT_UP_TO_DATE);

  if (nj != q_init.rows() || nj != q_out.rows())
    return (error = E_SIZE_MISMATCH);
    
	using namespace KDL;
	double v      = 2;//步长
	double tau    = 10;
	double rho;
	double lambda;//拉格朗日乘子
	Twist t;
	double delta_pos_norm;
	Eigen::Matrix<ScalarType,6,1> delta_pos;//位置姿态误差
	Eigen::Matrix<ScalarType,6,1> delta_pos_new;

	q=q_init.data.cast<ScalarType>();
	compute_fwdpos(q);//计算运动学正解,得到T_base_head,即当前角度q条件下,机械臂末端head
	//相对于base的变换矩阵
	Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos );//计算当前误差,得到
	//delta_pos,6*1的vertor,前3个元素为位置误差,欧式距离,后三个元素为角度误差,使用的是
	//轴角表示,即轴(3*1)乘以角度,以上误差均在base坐标系下表示,对应图1第2步.
	delta_pos=L.asDiagonal()*delta_pos;
	delta_pos_norm = delta_pos.norm();
	if (delta_pos_norm<eps) {//如果步长太小,则认为已经达到目标处,迭代结束.
		lastNrOfIter    =0 ;
		Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos );
		lastDifference  = delta_pos.norm();
		lastTransDiff   = delta_pos.topRows(3).norm();
		lastRotDiff     = delta_pos.bottomRows(3).norm();
		svd.compute(jac);
		original_Aii    = svd.singularValues();
		lastSV          = svd.singularValues();
		q_out.data      = q.cast<double>();
		return (error = E_NOERROR);
	}
	compute_jacobian(q);//计算space雅克比矩阵,图1第3步.
	jac = L.asDiagonal()*jac;//雅克比加权

	lambda = tau;
	double dnorm = 1;
	for (unsigned int i=0;i<maxiter;++i) {

		svd.compute(jac);//**********Segment A start******************//
		original_Aii = svd.singularValues()
		for (unsigned int j=0;j<original_Aii.rows();++j) {
			original_Aii(j) = original_Aii(j)/( original_Aii(j)*original_Aii(j)+lambda);
			//原理看图1.3
		}
		tmp = svd.matrixU().transpose()*delta_pos;
		tmp = original_Aii.cwiseProduct(tmp);
		diffq = svd.matrixV()*tmp;//**********Segment A end****************//
		//Segment A 段数学原理看图1.2与图1.3
		grad = jac.transpose()*delta_pos;
		if (display_information) {
			std::cout << "------- iteration " << i << " ----------------\n"
					  << "  q              = " << q.transpose() << "\n"
					  << "  weighted jac   = \n" << jac << "\n"
					  << "  lambda         = " << lambda << "\n"
					  << "  eigenvalues    = " << svd.singularValues().transpose() << "\n"
					  << "  difference     = "   << delta_pos.transpose() << "\n"
					  << "  difference norm= "   << delta_pos_norm << "\n"
					  << "  proj. on grad. = "   << grad << "\n";
			std::cout << std::endl;
		}
		dnorm = diffq.lpNorm<Eigen::Infinity>();
		if (dnorm < eps_joints) {
				lastDifference = delta_pos_norm;
				lastNrOfIter   = i;
				lastSV         = svd.singularValues();
				q_out.data     = q.cast<double>();
				compute_fwdpos(q);
				Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos );
				lastTransDiff  = delta_pos.topRows(3).norm();
				lastRotDiff    = delta_pos.bottomRows(3).norm();
				return (error = E_INCREMENT_JOINTS_TOO_SMALL);
		}

		if (grad.transpose()*grad < eps_joints*eps_joints ) {
			compute_fwdpos(q);
			Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos );
			lastDifference = delta_pos_norm;
			lastTransDiff = delta_pos.topRows(3).norm();
			lastRotDiff   = delta_pos.bottomRows(3).norm();
			lastSV        = svd.singularValues();
			lastNrOfIter  = i;
			q_out.data    = q.cast<double>();
			return (error = E_GRADIENT_JOINTS_TOO_SMALL);
		}

		q_new = q+diffq;//图1第5步
		compute_fwdpos(q_new);
		Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos_new );
		delta_pos_new             = L.asDiagonal()*delta_pos_new;
		double delta_pos_new_norm = delta_pos_new.norm();
		rho                       = delta_pos_norm*delta_pos_norm - delta_pos_new_norm*delta_pos_new_norm;
		rho                      /= diffq.transpose()*(lambda*diffq + grad);
		if (rho > 0) {
			q               = q_new;
			delta_pos       = delta_pos_new;
			delta_pos_norm  = delta_pos_new_norm;//更新参数,图1第6步
			if (delta_pos_norm<eps) {
				Twist_to_Eigen( diff( T_base_head, T_base_goal), delta_pos );
				lastDifference = delta_pos_norm;
				lastTransDiff  = delta_pos.topRows(3).norm();
				lastRotDiff    = delta_pos.bottomRows(3).norm();
				lastSV         = svd.singularValues();
				lastNrOfIter   = i;
				q_out.data     = q.cast<double>();
				return (error = E_NOERROR);
			}
			compute_jacobian(q_new);
			jac = L.asDiagonal()*jac;
			double tmp=2*rho-1;
			lambda = lambda*max(1/3.0, 1-tmp*tmp*tmp);
			v = 2;
		} else {
			lambda = lambda*v;//图1第7步更新
			v      = 2*v;
		}
	}
	lastDifference = delta_pos_norm;
	lastTransDiff  = delta_pos.topRows(3).norm();
	lastRotDiff    = delta_pos.bottomRows(3).norm();
	lastSV         = svd.singularValues();
	lastNrOfIter   = maxiter;
	q_out.data     = q.cast<double>();
	return (error = E_MAX_ITERATIONS_EXCEEDED);

}
  • 3
    点赞
  • 30
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值