利用PercipioDC-图样相机进行简单的三维重建

在学习了高翔博士的一起做RGB-D系列之后,我决定自己利用手上的图样相机实践一下,故将饰演步骤简单的记录一下方便以后回顾。

高翔博士的RGB-D系列一起做RGB-D SLAM (1) - 半闲居士 - 博客园

一、下载PercipioDC的SDK包

本人是在ubuntu18.04下做的实验,首先下载SDK包到PercipiodDC官网去下载相关资料。

PercipioDC 开发指南 — PercipioDC 2.3.0 documentation

二、实现简单的三维重建

1.采集数据

利用PercipioDC进行数据的采集,注意该相机的有效距离大概是1m左右,所以超出1m之后的图像数据可能会有较大的误差。

首先要注意的是深度图和彩色图的对齐,即其中的doRegister函数,这一步是非常重要的,如果深度图和彩色图没有对齐处理的话,后面的三维重建会出现匹配错误的现象。

然后就是理解handleFrame函数,这是对相机采得的数据进行处理的函数也是非常重要的。

之后就是在main函数中修改部分代码来达到自动采集数据并保存到指定路径的目的。

#include <common/common.hpp>
#include <TYImageProc.h>
#include <common/DepthRender.hpp>

#include <iostream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/cloud_viewer.h>

struct CallbackData 
{
	int             index;
	TY_ISP_HANDLE   IspHandle;
	TY_DEV_HANDLE   hDevice;
	DepthRender*    render;
	DepthViewer*    depthViewer;
	bool            needUndistort;

	TY_CAMERA_CALIB_INFO depth_calib;
	TY_CAMERA_CALIB_INFO color_calib;

	TY_CAMERA_INTRINSIC   intrinsic1;

	cv::Mat undistort_color;
	cv::Mat aligned_depth;
};

static void doRegister(const TY_CAMERA_CALIB_INFO& depth_calib //深度摄像头的内参
	                 , const TY_CAMERA_CALIB_INFO& color_calib //彩色摄像头的内参
	                 , const cv::Mat& depth //深度图像
	                 , const cv::Mat& color //彩色图像
	                 , bool needUndistort //是否进行彩色图像畸变矫正
	                 , cv::Mat& undistort_color //彩色图像畸变矫正对象
	                 , cv::Mat& out)//输出的 对齐的深度图像
{
	// do undistortion  去畸变操作
	if (needUndistort) 
	{
		TY_IMAGE_DATA src;
		src.width = color.cols;//图像宽度,访问像素列
		src.height = color.rows;//图像高度,访问像素行 
		src.size = color.size().area() * 3;//缓冲区大小
		src.pixelFormat = TY_PIXEL_FORMAT_RGB;//像素格式
		src.buffer = color.data;//指向数据缓冲区的指针

		undistort_color = cv::Mat(color.size(), CV_8UC3);// 创建一个undistort_color,指定大小及类型
		TY_IMAGE_DATA dst;
		dst.width = color.cols;//图像宽度,访问像素列
		dst.height = color.rows;//图像高度,访问像素行 
		dst.size = undistort_color.size().area() * 3;//缓冲区大小
		dst.buffer = undistort_color.data;//指向数据缓冲区的指针
		dst.pixelFormat = TY_PIXEL_FORMAT_RGB;//像素格式
		ASSERT_OK(TYUndistortImage(&color_calib, &src, NULL, &dst));//去畸变
	}
	else 
	{
		undistort_color = color;//不做畸变矫正,直接赋值
	}

	// do register  对齐操作
	out = cv::Mat::zeros(undistort_color.size(), CV_16U);
	ASSERT_OK(
		TYMapDepthImageToColorCoordinate(
			&depth_calib,
			depth.cols, depth.rows, depth.ptr<uint16_t>(),
			&color_calib,
			out.cols, out.rows, out.ptr<uint16_t>()
		));
}

void handleFrame(TY_FRAME_DATA* frame, void* userdata)
{
	CallbackData* pData = (CallbackData*)userdata;
	LOGD("=== Get frame %d", ++pData->index);

	cv::Mat depth, color;
	parseFrame(*frame, &depth, 0, 0, &color, pData->IspHandle);
	if (!depth.empty() && !color.empty()) 
	{
		//cv::Mat undistort_color, out;
		if (pData->needUndistort) //进行 畸变矫正 和 深度图和彩色图 对齐
		{
			doRegister(pData->depth_calib, pData->color_calib, depth, color, pData->needUndistort, pData->undistort_color, pData->aligned_depth);
		}
	}
	//LOGD("=== Re-enqueue buffer(%p, %d)", frame->userBuffer, frame->bufferSize);
	ASSERT_OK(TYEnqueueBuffer(pData->hDevice, frame->userBuffer, frame->bufferSize));
}

void eventCallback(TY_EVENT_INFO *event_info, void *userdata)
{
	if (event_info->eventId == TY_EVENT_DEVICE_OFFLINE) {
		LOGD("=== Event Callback: Device Offline!");
	}
	else if (event_info->eventId == TY_EVENT_LICENSE_ERROR) {
		LOGD("=== Event Callback: License Error!");
	}
}

std::string num2str(int i)
{
	std::stringstream ss;
	ss << i;
	return ss.str();
}

int main()
{
	std::string ID, IP;
	TY_INTERFACE_HANDLE hIface = NULL;
	TY_DEV_HANDLE hDevice = NULL;

	LOGD("=== Init lib");
	ASSERT_OK(TYInitLib());
	TY_VERSION_INFO ver;
	ASSERT_OK(TYLibVersion(&ver));
	LOGD("     - lib version: %d.%d.%d", ver.major, ver.minor, ver.patch);

	std::vector<TY_DEVICE_BASE_INFO> selected;
	ASSERT_OK(selectDevice(TY_INTERFACE_ALL, ID, IP, 1, selected));
	ASSERT(selected.size() > 0);
	TY_DEVICE_BASE_INFO& selectedDev = selected[0];

	ASSERT_OK(TYOpenInterface(selectedDev.iface.id, &hIface));
	ASSERT_OK(TYOpenDevice(hIface, selectedDev.id, &hDevice));

	int32_t allComps;
	ASSERT_OK(TYGetComponentIDs(hDevice, &allComps));
	if (!(allComps & TY_COMPONENT_RGB_CAM)) {
		LOGE("=== Has no RGB camera, cant do registration");
		return -1;
	}
	TY_ISP_HANDLE isp_handle;
	ASSERT_OK(TYISPCreate(&isp_handle));
	ASSERT_OK(ColorIspInitSetting(isp_handle, hDevice));

	//设置彩色图的分辨率
	int err = TYSetEnum(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, TY_PIXEL_FORMAT_JPEG | TY_RESOLUTION_MODE_640x480);

	std::vector<TY_ENUM_ENTRY> image_mode_list;
	ASSERT_OK(get_feature_enum_list(hDevice, TY_COMPONENT_DEPTH_CAM, TY_ENUM_IMAGE_MODE, image_mode_list));
	for (int idx = 0; idx < image_mode_list.size(); idx++)
	{
		TY_ENUM_ENTRY &entry = image_mode_list[idx];

		//try to select a VGA resolution
		//设置深度图的分辨率
		if (TYImageWidth(entry.value) == 640 || TYImageHeight(entry.value) == 640)
		{
			LOGD("Select Depth Image Mode: %s", entry.description);
			int err = TYSetEnum(hDevice, TY_COMPONENT_DEPTH_CAM, TY_ENUM_IMAGE_MODE, entry.value);
			ASSERT(err == TY_STATUS_OK || err == TY_STATUS_NOT_PERMITTED);
			break;
		}
	}

	LOGD("=== Configure components");
	int32_t componentIDs = TY_COMPONENT_DEPTH_CAM | TY_COMPONENT_RGB_CAM;
	ASSERT_OK(TYEnableComponents(hDevice, componentIDs));

	LOGD("=== Prepare image buffer");
	uint32_t frameSize;
	ASSERT_OK(TYGetFrameBufferSize(hDevice, &frameSize));
	LOGD("     - Get size of framebuffer, %d", frameSize);
	LOGD("     - Allocate & enqueue buffers");
	char* frameBuffer[2];
	frameBuffer[0] = new char[frameSize];
	frameBuffer[1] = new char[frameSize];
	LOGD("     - Enqueue buffer (%p, %d)", frameBuffer[0], frameSize);
	ASSERT_OK(TYEnqueueBuffer(hDevice, frameBuffer[0], frameSize));
	LOGD("     - Enqueue buffer (%p, %d)", frameBuffer[1], frameSize);
	ASSERT_OK(TYEnqueueBuffer(hDevice, frameBuffer[1], frameSize));

	LOGD("=== Register event callback");
	ASSERT_OK(TYRegisterEventCallback(hDevice, eventCallback, NULL));

	bool hasTriggerParam = false;
	ASSERT_OK(TYHasFeature(hDevice, TY_COMPONENT_DEVICE, TY_STRUCT_TRIGGER_PARAM, &hasTriggerParam));
	if (hasTriggerParam) {
		LOGD("=== Disable trigger mode");
		TY_TRIGGER_PARAM trigger;
		trigger.mode = TY_TRIGGER_MODE_OFF;//设置帧率
		//trigger.mode = TY_TRIGGER_MODE_M_PER;
		//trigger.fps = 24;
		ASSERT_OK(TYSetStruct(hDevice, TY_COMPONENT_DEVICE, TY_STRUCT_TRIGGER_PARAM, &trigger, sizeof(trigger)));
	}

	DepthViewer depthViewer("Depth");
	DepthRender render;
	CallbackData cb_data;
	cb_data.index = 0;
	cb_data.hDevice = hDevice;
	cb_data.depthViewer = &depthViewer;
	cb_data.render = &render;
	cb_data.needUndistort = true;
	cb_data.IspHandle = isp_handle;

	/*cv::Mat undistort_color, out;
	cb_data.undistort_color = undistort_color;
	cb_data.out = out;*/

	LOGD("=== Read depth calib info");//获取深度摄像头内参
	ASSERT_OK(TYGetStruct(hDevice, TY_COMPONENT_DEPTH_CAM, TY_STRUCT_CAM_CALIB_DATA
		, &cb_data.depth_calib, sizeof(cb_data.depth_calib)));

	LOGD("=== Read color calib info");//获取彩色摄像头参数
	ASSERT_OK(TYGetStruct(hDevice, TY_COMPONENT_RGB_CAM, TY_STRUCT_CAM_CALIB_DATA
		, &cb_data.color_calib, sizeof(cb_data.color_calib)));

	//LOGD("=== Read color calib info");//获取彩色摄像头内参
	//ASSERT_OK(TYGetStruct(hDevice, TY_COMPONENT_RGB_CAM, TY_STRUCT_CAM_INTRINSIC
	//	, &cb_data.intrinsic1, sizeof(cb_data.intrinsic1)));

	TYSetBool(hDevice, TY_COMPONENT_DEVICE, TY_BOOL_CMOS_SYNC, false);//关闭CMOS_SYNC,调整帧率

	LOGD("=== Start capture");
	ASSERT_OK(TYStartCapture(hDevice));

	LOGD("=== Wait for callback");
	bool exit_main = false;
	int i = 0;

	while (!exit_main) 
	{
		TY_FRAME_DATA frame;
		int err = TYFetchFrame(hDevice, &frame, -1);
		if (err != TY_STATUS_OK) 
		{
			LOGE("Fetch frame error %d: %s", err, TYErrorString(err));
			break;
		}

		handleFrame(&frame, &cb_data);//核心

		cv::imshow("undistort color", cb_data.undistort_color);
		//cv::imshow("aligned depth", cb_data.aligned_depth);
		//cb_data.depthViewer->show(cb_data.aligned_depth);

		std::string prefix1("color");
		std::string prefix2("depth");
		std::string extension(".png");
		std::string color_img = prefix1 + num2str(i) + extension;
		std::string depth_img = prefix2 + num2str(i) + extension;

		TYISPUpdateDevice(cb_data.IspHandle);


		char key = cv::waitKey(1);
		if (key==27)
		{
			break;
		}
		else if (key == 's') 
		{
			cv::imwrite(color_img, cb_data.undistort_color);
			//cv::imwrite(depth_img, cb_data.aligned_depth);
			i++;
		}
	}//循环结束

	ASSERT_OK(TYStopCapture(hDevice));
	ASSERT_OK(TYCloseDevice(hDevice));
	ASSERT_OK(TYCloseInterface(hIface));
	ASSERT_OK(TYDeinitLib());
	delete frameBuffer[0];//释放所分配的内存
	delete frameBuffer[1];

	LOGD("=== Main done!");
	return 0;
}

2.相机的标定

相机的标定可以参考我的另一篇文章,就是对该相机的内参的标定。

相机内参的标定_er_dan_love的博客-CSDN博客_相机内参标定

3.特征点的提取和匹配

这里主要是参考的高博的RGB-D系列,需要注意的是,在高博的系列中所用的opencv的版本是2.4,而我采用的opencv的版本是3.4故其中的函数有一些发生了变化,如果你想让这两个不同的opencv版本共存的话,请参考我的另一篇文章,

ubuntu 18.04 安装opencv2.4.13 与opencv3.4.15共存_er_dan_love的博客-CSDN博客

但是在安装了两个不同版本的opencv之后发现别的库会出现问题,所以还是去修改了高博系列中的代码,也比较简单。

主要就是特征点的提取方式的函数有所改变以及计算两帧图像数据的变换矩阵的函数参数有所改变。下面就是更改之后的代码:

#include<iostream>
#include "../include/slamBase.h"
using namespace std;

// OpenCV 特征检测模块
#include <opencv2/features2d/features2d.hpp>
// #include <opencv2/nonfree/nonfree.hpp> // use this if you want to use SIFT or SURF
#include <opencv2/calib3d/calib3d.hpp>

int main( int argc, char** argv )
{
    // 声明并从data文件夹里读取两个rgb与深度图
    cv::Mat rgb1 = cv::imread( "/home/zjs/data/data/rgb_png/2.png");
    cv::Mat rgb2 = cv::imread( "/home/zjs/data/data/rgb_png/5.png");
    cv::Mat depth1 = cv::imread( "/home/zjs/data/data/depth_png/2.png", -1);
    cv::Mat depth2 = cv::imread( "/home/zjs/data/data/depth_png/5.png", -1);

    // 声明特征提取器与描述子提取器
    cv::Ptr<cv::FeatureDetector> detector;
    cv::Ptr<cv::DescriptorExtractor> descriptor;

    // 构建提取器,默认两者都为 ORB
    
    // 如果使用 sift, surf ,之前要初始化nonfree模块
    // cv::initModule_nonfree();
    // _detector = cv::FeatureDetector::create( "SIFT" );
    // _descriptor = cv::DescriptorExtractor::create( "SIFT" );
    
    detector = cv::ORB::create();
    descriptor = cv::ORB::create();

    vector< cv::KeyPoint > kp1, kp2; //关键点
    detector->detect( rgb1, kp1 );  //提取关键点
    detector->detect( rgb2, kp2 );

    cout<<"Key points of two images: "<<kp1.size()<<", "<<kp2.size()<<endl;
    
    // 可视化, 显示关键点
    cv::Mat imgShow;
    cv::drawKeypoints( rgb1, kp1, imgShow, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    cv::imshow( "keypoints", imgShow );
    cv::imwrite( "/home/zjs/rgbd-slam-tutorial-gx/part IV/my_data/keypoints.png", imgShow );
    cv::waitKey(0); //暂停等待一个按键
   
    // 计算描述子
    cv::Mat desp1, desp2;
    descriptor->compute( rgb1, kp1, desp1 );
    descriptor->compute( rgb2, kp2, desp2 );

    // 匹配描述子
    vector< cv::DMatch > matches; 
    cv::BFMatcher matcher;
    matcher.match( desp1, desp2, matches );
    cout<<"Find total "<<matches.size()<<" matches."<<endl;

    // 可视化:显示匹配的特征
    cv::Mat imgMatches;
    cv::drawMatches( rgb1, kp1, rgb2, kp2, matches, imgMatches );
    cv::imshow( "matches", imgMatches );
    cv::imwrite( "/home/zjs/rgbd-slam-tutorial-gx/part IV/my_data/matches.png", imgMatches );
    cv::waitKey( 0 );

    // 筛选匹配,把距离太大的去掉
    // 这里使用的准则是去掉大于四倍最小距离的匹配
    vector< cv::DMatch > goodMatches;
    double minDis = 9999;
    for ( size_t i=0; i<matches.size(); i++ )
    {
        if ( matches[i].distance < minDis )
            minDis = matches[i].distance;
    }
    cout<<"min dis = "<<minDis<<endl;

    for ( size_t i=0; i<matches.size(); i++ )
    {
        if (matches[i].distance < 10*minDis)
            goodMatches.push_back( matches[i] );
    }

    // 显示 good matches
    cout<<"good matches="<<goodMatches.size()<<endl;
    cv::drawMatches( rgb1, kp1, rgb2, kp2, goodMatches, imgMatches );
    cv::imshow( "good matches", imgMatches );
    cv::imwrite( "/home/zjs/rgbd-slam-tutorial-gx/part IV/my_data/good_matches.png", imgMatches );
    cv::waitKey(0);

    // 计算图像间的运动关系
    // 关键函数:cv::solvePnPRansac()
    // 为调用此函数准备必要的参数
    
    // 第一个帧的三维点
    vector<cv::Point3f> pts_obj;
    // 第二个帧的图像点
    vector< cv::Point2f > pts_img;

    // 相机内参
    CAMERA_INTRINSIC_PARAMETERS C;
    C.cx = 639.5;
    C.cy = 338.8;
    C.fx = 886.2;
    C.fy = 884.5;
    C.scale = 1000.0;

    for (size_t i=0; i<goodMatches.size(); i++)
    {
        // query 是第一个, train 是第二个
        cv::Point2f p = kp1[goodMatches[i].queryIdx].pt;
        // 获取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
        ushort d = depth1.ptr<ushort>( int(p.y) )[ int(p.x) ];
        if (d == 0)
            continue;
        pts_img.push_back( cv::Point2f( kp2[goodMatches[i].trainIdx].pt ) );

        // 将(u,v,d)转成(x,y,z)
        cv::Point3f pt ( p.x, p.y, d );
        cv::Point3f pd = point2dTo3d( pt, C );
        pts_obj.push_back( pd );
    }

    double camera_matrix_data[3][3] = {
        {C.fx, 0, C.cx},
        {0, C.fy, C.cy},
        {0, 0, 1}
    };

    // 构建相机矩阵
    cv::Mat cameraMatrix( 3, 3, CV_64F, camera_matrix_data );
    cv::Mat rvec, tvec, inliers;
    // 求解pnp
    cv::solvePnPRansac( pts_obj, pts_img, cameraMatrix, cv::Mat(), rvec, tvec, false, 100, 1.0, 0.99, inliers );

    cout<<"inliers: "<<inliers.rows<<endl;
    cout<<"R="<<rvec<<endl;
    cout<<"t="<<tvec<<endl;

    // 画出inliers匹配 
    vector< cv::DMatch > matchesShow;
    for (size_t i=0; i<inliers.rows; i++)
    {
        matchesShow.push_back( goodMatches[inliers.ptr<int>(i)[0]] );    
    }
    cv::drawMatches( rgb1, kp1, rgb2, kp2, matchesShow, imgMatches );
    cv::imshow( "inlier matches", imgMatches );
    cv::imwrite( "/home/zjs/rgbd-slam-tutorial-gx/part IV/my_data/inliers.png", imgMatches );
    cv::waitKey( 0 );

    return 0;
}

特征点检测完成之后要做的就是把图像数据转换为点云图请参考另一篇文章

RGB-D转换为点云图_er_dan_love的博客-CSDN博客_rgb转点云

4.利用采集的数据进行三维重建

该步操作也主要是在高博的代码上进行修改得到的,直接上代码:

#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;

#include "../include/slamBase.h"

// 给定index,读取一帧数据
FRAME readFrame( int index, ParameterReader& pd );
// 度量运动的大小
double normofTransform( cv::Mat rvec, cv::Mat tvec );

int main( int argc, char** argv )
{
    ParameterReader pd;
    int startIndex  =   atoi( pd.getData( "start_index" ).c_str() );
    int endIndex    =   atoi( pd.getData( "end_index"   ).c_str() );

    // initialize
    cout<<"Initializing ..."<<endl;
    int currIndex = startIndex; // 当前索引为currIndex
    FRAME lastFrame = readFrame( currIndex, pd ); // 上一帧数据
    // 我们总是在比较currFrame和lastFrame
    string detector = pd.getData( "detector" );
    string descriptor = pd.getData( "descriptor" );
    CAMERA_INTRINSIC_PARAMETERS camera = getDefaultCamera();
    computeKeyPointsAndDesp( lastFrame, detector, descriptor );
    PointCloud::Ptr cloud = image2PointCloud( lastFrame.rgb, lastFrame.depth, camera );
    
    pcl::visualization::CloudViewer viewer("viewer");

    // 是否显示点云
    bool visualize = pd.getData("visualize_pointcloud")==string("yes");

    int min_inliers = atoi( pd.getData("min_inliers").c_str() );
    double max_norm = atof( pd.getData("max_norm").c_str() );

    for ( currIndex=startIndex+1; currIndex<endIndex; currIndex++ )
    {
        cout<<"Reading files "<<currIndex<<endl;
        FRAME currFrame = readFrame( currIndex,pd ); // 读取currFrame
        computeKeyPointsAndDesp( currFrame, detector, descriptor );
        // 比较currFrame 和 lastFrame
        RESULT_OF_PNP result = estimateMotion( lastFrame, currFrame, camera );
        if ( result.inliers < min_inliers ) //inliers不够,放弃该帧
            continue;
        // 计算运动范围是否太大
        double norm = normofTransform(result.rvec, result.tvec);
        cout<<"norm = "<<norm<<endl;
        if ( norm >= max_norm )
            continue;
        Eigen::Isometry3d T = cvMat2Eigen( result.rvec, result.tvec );
        cout<<"T="<<T.matrix()<<endl;
        
        cloud = joinPointCloud( cloud, currFrame, T, camera );
        
        if ( visualize == true )
            viewer.showCloud( cloud );

        lastFrame = currFrame;
    }

    pcl::io::savePCDFile( "/home/zjs/rgbd-slam-tutorial-gx/part IV/my_data/result_V.pcd", *cloud );
    return 0;
}

FRAME readFrame( int index, ParameterReader& pd )
{
    FRAME f;
    string rgbDir   =   pd.getData("rgb_dir");
    string depthDir =    pd.getData("depth_dir");
    
    string rgbExt   =   pd.getData("rgb_extension");
    string depthExt =   pd.getData("depth_extension");

    stringstream ss;
    ss<<rgbDir<<index<<rgbExt;
    string filename;
    ss>>filename;
    f.rgb = cv::imread( filename );

    ss.clear();
    filename.clear();
    ss<<depthDir<<index<<depthExt;
    ss>>filename;

    f.depth = cv::imread( filename, -1 );
    return f;
}

double normofTransform( cv::Mat rvec, cv::Mat tvec )
{
    return fabs(min(cv::norm(rvec), 2*M_PI-cv::norm(rvec)))+ fabs(cv::norm(tvec));
}

在这个里面主要是要看懂他里面函数,同时在运行时也要对相应的参数进行修改:

# part 4 里定义的参数
# 特征类型
detector=ORB
descriptor=ORB
# 筛选good match的倍数
good_match_threshold=10

# camera
camera.cx=639.5;
camera.cy=338.8;
camera.fx=886.2;
camera.fy=884.5;
camera.scale=1000.0;

# part 5 
# 数据相关
# 起始与终止索引
start_index=1
end_index=240
# 数据所在目录
rgb_dir=/home/zjs/data/data/rgb_png/
rgb_extension=.png
depth_dir=/home/zjs/data/data/depth_png/
depth_extension=.png
# 点云分辨率
voxel_grid=0.01
# 是否实时可视化
visualize_pointcloud=yes
# 最小匹配数量
min_good_match=10
# 最小内点
min_inliers=5
# 最大运动误差
max_norm=0.3

5.重建结果

从重建结果可以看出,在1m之内的重建效果是比较好的,而在1m之外的重建效果不太好,出现了匹配不上的结果。


总结

以上就是自己做的一个简单的小实验,你也可以修改其中的参数或是代码来使实验效果更佳。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值