机器学习算法C++实现

感知机

https://www.cnblogs.com/liuhuacai/p/11973036.html

main.cpp

#include <iostream>
#include <vector>
#include "perceptron.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new Perceptron();
    obj->run();
    delete obj;
    return 0;
}

perceptron.cpp 

#include "perceptron.h"


using std::string;
using std::vector;
using std::pair;


void Perceptron::getData(const std::string &filename) {
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size();
        indim -= 1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}

void Perceptron::splitData(const float& trainTotalRatio){
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * trainTotalRatio;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }

}
void Perceptron::createFeatureGt() {
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}

void Perceptron::initialize(std::vector<double>& init) {
    // must initialize parameter first, using vector to initialize
    if(init.size()!=indim+1) {
        std::cout<<"input dimension is should be "+std::to_string(indim+1)<<std::endl;
        throw init.size();
    }
    w.assign(init.begin(), init.end()-1);
    b = *(init.end()-1);
}



double Perceptron::inference(const std::vector<double>& inputData){
    //just compute wx+b , for compute loss and predict.
    if (inputData.size()!=indim){
        std::cout<<"input dimension is incorrect. "<<std::endl;
        throw inputData.size();
    }

    double sum_tem = 0.0;
    sum_tem = inputData * w;
    sum_tem += b;
    return sum_tem;
}



double Perceptron::loss(const std::vector<double>& inputData, const double& groundTruth){
    double infer = inference(inputData);
    double loss = -1.0 * groundTruth * infer;
    std::cout<<"loss is "<< loss <<std::endl;
    return loss;
}



std::pair<std::vector<double>, double> Perceptron::computeGradient(const std::vector<double>& inputData, const double& groundTruth) {
    double lossVal = loss(inputData, groundTruth);
    std::vector<double> wi;
    double bi;
    if (lossVal >= 0.0)
    {
        for(auto indata:inputData) {
            wi.push_back(indata*groundTruth);
        }
        bi = groundTruth;
    }
    else{
        for(auto indata:inputData) {
            wi.push_back(0.0);
        }
        bi = 0.0;
    }
    return std::pair<std::vector<double>, double>(wi, bi);//here, for understandable, we use pair to represent w and b.
    //you also could return a vector which contains w and b.
}


void Perceptron::train(const int & step, const float & lr) {
    std::vector<double> init = {1.0,1.0,1.0};
    initialize(init);
    int count = 0;
    for(int i=0; i<step; ++i){
        if (count==trainDataF.size()-1)
            count = 0;

        std::vector<double> inputData = trainDataF[count];
        double groundTruth = trainDataGT[count];
        auto grad = computeGradient(inputData, groundTruth);
        auto grad_w = grad.first;
        double grad_b = grad.second;
        for (int j=0; j<indim;++j){
            w[j] += lr * (grad_w[j]);
        }
        b += lr * (grad_b);
        count++;
    }
}


int Perceptron::predict(const std::vector<double>& inputData) {

    double out = inference(inputData);

    if(out>=0.0){

        return 1;
    }
    else{
        return -1;
    }


}

/*perceptrondata.txt
3 4 1
1 1 -1
2 4 1
1 2 -1
1 5 1
2 0.5 -1
1 6 1
1 2.5 -1
0.5 6 1
0 1 -1
2 2.5 1
0.5 1 -1
1 4 1
1.5 1 -1
2.7 1 1
2 3.5 1
0.8 3 -1
0.1 4 -1
*/
void Perceptron::run(){
    //记得更改样本路径
    getData("../data/perceptrondata.txt");
    splitData(0.6);//below is split data , and store it in  trainData, testData
    createFeatureGt();
    train(200, 1.0);//20 is steps and 1.0 is learning rate
    std::vector<std::vector<double>>  testData = getTestDataFeature();
    std::vector<double> testGT = getTestGT();
    for(int i=0; i<testData.size(); ++i){
        std::cout<<i<<std::endl;
        std::cout<<"The right class is "<<testGT[i]<<std::endl;
        int out = predict(testData[i]);
        std::cout<<"The predict class is "<<out<<std::endl;
    }
}

perceptron.h

#ifndef MACHINE_LEARNING_PERCEPTRON_H
#define MACHINE_LEARNING_PERCEPTRON_H

#include <vector>
#include <array>
#include <utility>
#include "model_base.h"


class Perceptron: public Base{
private:
    std::vector<double> w;
    double b;
public:
    virtual void getData(const std::string& filename);
    virtual void run();
    void splitData(const float& );
    void createFeatureGt();//create feature for test,using trainData, testData
    void setDim(const unsigned long& iDim){indim = iDim;}
    double inference(const std::vector<double>&) ;
    void initialize(std::vector<double>& init);
    void train(const int& step,const float& lr);
    int predict(const std::vector<double>& inputData);
    double loss(const std::vector<double>& inputData, const double& groundTruth);
    std::pair<std::vector<double>, double> computeGradient(const std::vector<double>& inputData, const double& groundTruth);
    std::vector<std::vector<double>> getTestDataFeature(){return testDataF;}
    std::vector<double> getTestGT(){ return testDataGT;}
};



#endif //MACHINE_LEARNING_PERCEPTRON_H

model_base.h 

#ifndef MACHINE_LEARNING_MODEL_BASE_H
#define MACHINE_LEARNING_MODEL_BASE_H

#include <vector>
#include <fstream>
#include <string>
#include <iostream>
#include <sstream>
#include <algorithm>
using std::vector;
using std::cout;
using std::endl;
//this base class is for run
class Base{
protected:
    std::vector<double> trainDataGT;//真值
    std::vector<double> testDataGT;
    std::vector<std::vector<double>> inData;//从文件都的数据
    std::vector<std::vector<double>> trainData;//分割后的训练数据,里面包含真值
    std::vector<std::vector<double>> testData;
    unsigned long indim = 0;
    std::vector<std::vector<double>> trainDataF;//真正的训练数据,特征
    std::vector<std::vector<double>> testDataF;
  
public:
    void setTrainD(vector<std::vector<double>>& trainF, vector<double>& trainGT) {trainDataF = trainF; trainDataGT=trainGT;}
    void setTestD(vector<std::vector<double>>& testF, vector<double>& testGT) {testDataGT = testGT; testDataGT=testGT;}
    virtual void getData(const std::string& filename)=0;
    virtual void run()=0;
    virtual ~Base(){};
    template <class T1, class T2>
    friend auto operator + (const vector<T1>& v1, const vector<T2>& v2)->vector<decltype(v1[0] + v2[0])>;
    template <class T1, class T2>
    friend auto operator - (const vector<T1>& v1, const vector<T2>& v2)->vector<decltype(v1[0] + v2[0])>;
    template <class T1, class T2>
    friend double operator * (const vector<T1>& v1, const vector<T2>& v2);
    template <class T1, class T2>
    friend auto operator / (const vector<T1>& v1, const vector<T2>& v2)->vector<decltype(v1[0] + v2[0])>;
    template <class T1, class T2>
    friend auto operator + (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 + v2[0])>;
    template <class T1, class T2>
    friend auto operator - (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 + v2[0])>;
    template <class T1, class T2>
    friend auto operator * (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 + v2[0])>;
    template <class T1, class T2>
    friend auto operator / (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 + v2[0])>;
    template <class T1, class T2>
    friend auto operator + (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] + arg2)>;
    template <class T1, class T2>
    friend auto operator - (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] + arg2)>;
    template <class T1, class T2>
    friend auto operator * (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] + arg2)>;
    template <class T1, class T2>
    friend auto operator / (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] + arg2)>;
    template <class T1>
    friend vector<vector<T1>> transpose(const vector<vector<T1>>& mat);
    template <class T1>
    friend vector<vector<T1>> vecMulVecToMat(const vector<T1>& vec1, const vector<T1>& vec2);
    template <class T1, class T2>
    friend auto operator + (const vector<vector<T1>>& v1, const vector<vector<T2>>& v2)
    ->vector<vector<decltype(v1[0][0] + v2[0][0])>>;
};
template <class T1, class T2>
auto operator + (const vector<T1>& v1, const vector<T2>& v2) ->vector<decltype(v1[0] + v2[0])> {

    if (v1.size() != v2.size()) {
        cout << "two vector must have same size." << endl;
        throw v1.size() != v2.size();
    }
    if (v1.empty()) {
        cout << "vector must not empty." << endl;
        throw v1.empty();
    }
    vector<decltype(v1[0] + v2[0])> re(v1.size());
    for (int i = 0; i < v1.size(); ++i) {
        re[i] = v1[i] + v2[i];
    }
    return re;
}
template <class T1, class T2>
auto operator - (const vector<T1>& v1, const vector<T2>& v2)->vector<decltype(v1[0] + v2[0])> {
    if (v1.size() != v2.size()) {
        cout << "two vector must have same size." << endl;
        throw v1.size() != v2.size();
    }
    if (v1.empty()){
        cout << "vector must not empty." << endl;
        throw v1.empty();
    }
    vector<decltype(v1[0] - v2[0])> re(v1.size());
    for (int i = 0; i < v1.size(); ++i) {
        re[i] = v1[i] - v2[i];
    }
    return re;
}


template <class T1, class T2>
double operator * (const vector<T1>& v1, const vector<T2>& v2) {
    if (v1.size() != v2.size()) {
        cout << "two vector must have same size." << endl;
        throw v1.size() != v2.size();
    }
    if (v1.empty()){
        cout << "vector must not empty." << endl;
        throw v1.empty();
    }
    decltype(v1[0] * v2[0]) re = 0;
    for (int i = 0; i < v1.size(); ++i) {
        re += v1[i] * v2[i];
    }
    return re;
}




template <class T1, class T2>
auto operator / (const vector<T1>& v1, const vector<T2>& v2)->vector<decltype(v1[0] / v2[0])> {
    if (v1.size() != v2.size()) {
        cout << "two vector must have same size." << endl;
        throw v1.size() != v2.size();
    }
    if (v1.empty()){
        cout << "vector must not empty." << endl;
        throw v1.empty();
    }
    vector<decltype(v1[0] / v2[0])> re(v1.size());
    for (int i = 0; i < v1.size(); ++i) {
        re[i] = v1[i] / v2[i];
    }
    return re;
}


template <class T1, class T2>
auto operator + (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 + v2[0])>{


    if (v2.empty()){
        cout << "vector must not empty." << endl;
        throw v2.empty();
    }
    vector<decltype(arg1 + v2[0])> re(v2.size());
    for (int i = 0; i < v2.size(); ++i) {
        re[i] = arg1 + v2[i];
    }
    return re;
}


template <class T1, class T2>
auto operator - (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 - v2[0])>{


    if (v2.empty()){
        cout << "vector must not empty." << endl;
        throw v2.empty();
    }
    vector<decltype(arg1 - v2[0])> re(v2.size());
    for (int i = 0; i < v2.size(); ++i) {
        re[i] = arg1 - v2[i];
    }
    return re;
}


template <class T1, class T2>
auto operator * (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 * v2[0])>{


    if (v2.empty()){
        cout << "vector must not empty." << endl;
        throw v2.empty();
    }
    vector<decltype(arg1 * v2[0])> re(v2.size());
    for (int i = 0; i < v2.size(); ++i) {
        re[i] = arg1 * v2[i];
    }
    return re;
}


template <class T1, class T2>
auto operator / (const T1& arg1, const vector<T2>& v2)->vector<decltype(arg1 / v2[0])>{


    if (v2.empty()){
        cout << "vector must not empty." << endl;
        throw v2.empty();
    }
    vector<decltype(arg1 / v2[0])> re(v2.size());
    for (int i = 0; i < v2.size(); ++i) {
        re[i] = arg1 / v2[i];
    }
    return re;
}


template <class T1, class T2>
auto operator + (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] + arg2)>{
    return arg2+v1;
}


template <class T1, class T2>
auto operator - (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] - arg2)>{
    return arg2-v1;
}

template <class T1, class T2>
auto operator * (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] * arg2)>{
    return arg2*v1;
}

template <class T1, class T2>
auto operator / (const vector<T1>& v1, const T2& arg2)->vector<decltype(v1[0] / arg2)>{
    if (v1.empty()){
        cout << "vector must not empty." << endl;
        throw v1.empty();
    }
    vector<decltype(v1[0]/arg2)> re(v1.size());
    for (int i = 0; i < v1.size(); ++i) {
        re[i] = v1[i]/arg2;
    }
    return re;
}

template <class T1>
vector<vector<T1>> transpose(const vector<vector<T1>>& mat) {
    vector<vector<T1>> newMat (mat.size(), vector<T1> (mat.size(), 0));
    for (int i = 0; i < mat.size(); ++i) {
        for (int j = 0; j < mat.size(); ++j)
            newMat[i][j] = mat[j][i];
    }
    return newMat;
}
template <class T1>
vector<vector<T1>> vecMulVecToMat(const vector<T1>& vec1, const vector<T1>& vec2) {
    if (vec1.size() != vec2.size())
        cout << "Two dimension of two vectors are not same!" <<  endl;
    vector<vector<T1>> newMat (vec1.size(), vector<T1> (vec2.size(), 0));
    for (int i = 0; i < vec1.size(); ++i) {
        for (int j = 0; j < vec2.size(); ++j){
            newMat[i][j] = vec1[i] * vec2[j];
        }
    }
    return newMat;
}

template <class T1, class T2>
auto operator + (const vector<vector<T1>>& v1, const vector<vector<T2>>& v2)
->vector<vector<decltype(v1[0][0] + v2[0][0])>> {
    if (v1.size() != v2.size())
        std::cerr<< "Two dimension of two vectors are not same!" << endl;
    vector<vector<decltype(v1[0][0] + v2[0][0])>> newMat;
    for (int i = 0; i < v1.size(); ++i)
        newMat.push_back(v1[i] + v2[i]);
    return newMat;
}
#endif //MACHINE_LEARNING_MODEL_BASE_H

K近邻

 

 

main.cpp 

#include <iostream>
#include <vector>
#include "knn.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new Knn();
    obj->run();
    delete obj;
    return 0;
}

knn.cpp 

#include "knn.h"
using std::string;
using std::vector;
using std::pair;
using std::priority_queue;
using std::stack;

void Knn::getData(const std::string &filename) {
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size();
        indim -= 1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}


void Knn::createTrainTest(const float& trainTotalRatio){
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * trainTotalRatio;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }

}


void Knn::createSplitAxis(){
    cout<<"createSplitAxis..."<<endl;
    //the last element of trainData is gt
    vector<pair<unsigned long, double>> varianceVec;
    auto sumv = trainData[0];
    for(unsigned long i=1;i<trainData.size();++i){
        sumv = sumv + trainData[i];
    }
    auto meanv = sumv/trainData.size();
    vector<decltype(trainData[0]-meanv)> subMean;
    for(const auto& c:trainData)
        subMean.push_back(c-meanv);
    for (unsigned long i = 0; i < trainData.size(); ++i) {
        for (unsigned long j = 0; j < indim; ++j) {
            subMean[i][j] *= subMean[i][j];
        }

    }
    auto varc = subMean[0];
    for(unsigned long i=1;i<subMean.size();++i){
        varc = varc + subMean[i];
    }
    auto var = varc/subMean.size();
    for(unsigned long i=0;i<var.size()-1;++i){//here not contain the axis of gt
        varianceVec.push_back(pair<unsigned long, double>(i, var[i]));
    }
    std::sort(varianceVec.begin(), varianceVec.end(), [](pair<unsigned long, double> &left, pair<unsigned long, double> &right) {
        return left.second < right.second;
    });
    for(const auto& variance:varianceVec){
        axisVec.push(variance.first);//the maximum variance is on the top
    }
    cout<<"createSplitAxis over"<<endl;
}



void Knn::setRoot() {
    if(axisVec.empty()){
        cout<<"please run createSplitAxis first."<<endl;
        throw axisVec.empty();
    }
    auto axisv = axisVec;
    auto axis = axisv.top();
    axisv.pop();
    std::sort(trainData.begin(), trainData.end(), [&axis](vector<double> &left, vector<double > &right) {
        return left[axis]<right[axis];
    });
    unsigned long mid = trainData.size()/2;
    for(unsigned long i = 0; i < trainData.size(); ++i){
        if(i!=mid){
            if (i<mid)
                root->leftTreeVal.push_back(trainData[i]);

            else
                root->rightTreeVal.push_back(trainData[i]);
        } else{
            root->val.assign(trainData[i].begin(),trainData[i].end()-1);
            root->splitVal = trainData[i][axis];
            root->axis = axis;
            root->cls = *(trainData[i].end()-1);
        }
    }
    cout<<"root node set over"<<endl;
}



KdtreeNode* Knn::buildTree(KdtreeNode*root, vector<vector<double>>& data, stack<unsigned long>& axisStack) {

    stack<unsigned long> aS;
    if(axisStack.empty())
        aS=axisVec;
    else
        aS=axisStack;
    auto node = new KdtreeNode();
    node->parent = root;

    auto axis2 = aS.top();
    aS.pop();

    std::sort(data.begin(), data.end(), [&axis2](vector<double> &left, vector<double > &right) {
        return left[axis2]<right[axis2];
    });

    unsigned long mid = data.size()/2;

    if(node->leftTreeVal.empty()&&node->rightTreeVal.empty()){
        for(unsigned long i = 0; i < data.size(); ++i){
            if(i!=mid){
                if (i<mid)
                    node->leftTreeVal.push_back(data[i]);
                else
                    node->rightTreeVal.push_back(data[i]);

            } else{
                node->val.assign(data[i].begin(),data[i].end()-1);
                node->splitVal = data[i][axis2];
                node->axis = axis2;
                node->cls = *(data[i].end()-1);
            }
        }
    }

    if(!node->leftTreeVal.empty()){
        node->left = buildTree(node, node->leftTreeVal, aS);
    }
    if(!node->rightTreeVal.empty()){
        node->right = buildTree(node, node->rightTreeVal, aS);
    }

    return node;
}


void Knn::showTree(KdtreeNode* root) {
    if(root == nullptr)
        return;
    cout<<"the feature is ";
    for(const auto& c:root->val)
        cout<<c<<" ";
    cout<<" the class is "<<root->cls<<endl;
    showTree(root->left);
    showTree(root->right);
}


void Knn::findKNearest(vector<double>& testD){
    cout<<"the test data is(the last is class) ";
    for(const auto& c:testD)
        cout<<c<<" ";
    cout<<"\nsearching "<<K<<" nearest val..."<<endl;
    stack<KdtreeNode*> path;

    auto curNode = root;
    while(curNode!= nullptr){
        path.push(curNode);
        if(testD[curNode->axis]<=curNode->splitVal)
            curNode = curNode->left;
        else
            curNode = curNode->right;
    }
    while(!path.empty()){
        auto curN = path.top();
        path.pop();
        vector<double> testDF(testD.begin(),testD.end()-1);
        double dis=0.0;
        dis = computeDis(testDF, curN->val);
        if(maxHeap.size()<K){
            maxHeap.push(pair<double, KdtreeNode*>(dis, curN));
        }
        else{
            if(dis<maxHeap.top().first){
                maxHeap.pop();
                maxHeap.push(pair<double, KdtreeNode*>(dis, curN));
            }
        }
        if(path.empty())
            continue;
        auto curNparent = path.top();
        KdtreeNode* curNchild;
        if(testDF[curNparent->axis]<=curNparent->splitVal)
            curNchild = curNparent->right;
        else
            curNchild = curNparent->left;
        if(curNchild == nullptr)
            continue;
        double childDis = computeDis(testDF, curNchild->val);
        if(childDis<maxHeap.top().first){
            maxHeap.pop();
            maxHeap.push(pair<double, KdtreeNode*>(childDis, curNchild));
            while(curNchild!= nullptr){//add subtree to path
                path.push(curNchild);
                if(testD[curNchild->axis]<=curNchild->splitVal)
                    curNchild = curNchild->left;
                else
                    curNchild = curNchild->right;
            }
        }
    }

}


double Knn::computeDis(const vector<double>& v1, const vector<double>& v2){
    auto v = v1 - v2;
    double di = v*v;
    return di;
}


void Knn::DeleteRoot(KdtreeNode *pRoot) //<根据根节点删除整棵树
{
    if (pRoot == nullptr) {
        return;
    }
    KdtreeNode *pLeft = pRoot->left;
    KdtreeNode *pRight = pRoot->right;
    delete pRoot;
    pRoot = nullptr;
    if (pLeft) {
        DeleteRoot(pLeft);
    }
    if (pRight) {
        DeleteRoot(pRight);
    }
    return;
}


Knn::~Knn(){
    DeleteRoot(root);
}

/*perceptrondata.txt
3 4 1
1 1 -1
2 4 1
1 2 -1
1 5 1
2 0.5 -1
1 6 1
1 2.5 -1
0.5 6 1
0 1 -1
2 2.5 1
0.5 1 -1
1 4 1
1.5 1 -1
2.7 1 1
2 3.5 1
0.8 3 -1
0.1 4 -1
*/
void Knn::run(){
    getData("../data/perceptrondata.txt");
    createTrainTest(0.6);
    createSplitAxis();
    setRoot();
    root->left = buildTree(root, root->leftTreeVal, axisVec);
    root->right = buildTree(root, root->rightTreeVal, axisVec);
    cout<<"show the tree in preorder traversal."<<endl;
    showTree(root);
    setK(2);
    for(auto& a:testData) {
        findKNearest(a);
        while (!maxHeap.empty()) {
            cout << "dis: " << maxHeap.top().first;
            cout << " val: ";
            for (auto &c :maxHeap.top().second->val)
                cout << c << " ";
            cout << endl;
            maxHeap.pop();
        }
    }
}

knn.h 

#ifndef MACHINE_LEARNING_KNN_H
#define MACHINE_LEARNING_KNN_H

#include <vector>
#include <stack>
#include <utility>
#include <queue>
#include "model_base.h"



struct KdtreeNode {
    std::vector<double> val;//store val for feature
    int cls;//store class
    unsigned long axis;//split axis
    double splitVal;//mid val for axis
    std::vector<std::vector<double>> leftTreeVal;
    std::vector<std::vector<double>> rightTreeVal;
    KdtreeNode* parent;
    KdtreeNode* left;
    KdtreeNode* right;
    KdtreeNode(): cls(0), axis(0), splitVal(0.0), parent(nullptr), left(nullptr), right(nullptr){};

};


class Knn: public Base{
private:
    std::stack<unsigned long> axisVec;
    KdtreeNode* root = new KdtreeNode();
    unsigned long K;
    std::priority_queue<std::pair<double, KdtreeNode*>> maxHeap;
public:
    virtual void getData(const std::string& filename);
    virtual void run();
    void createTrainTest(const float& trainTotalRatio);
    KdtreeNode* buildTree(KdtreeNode*root, std::vector<std::vector<double>>& data, std::stack<unsigned long>& axisstack);
    void setRoot();
    void createSplitAxis();
    KdtreeNode* getRoot(){return root;}
    void setK(unsigned long k){K = k;}
    void findKNearest(std::vector<double>& testD);
    double computeDis(const std::vector<double>& v1, const std::vector<double>& v2);
    void DeleteRoot(KdtreeNode *pRoot);
    void showTree(KdtreeNode* root);
    ~Knn();
};



#endif //MACHINE_LEARNING_KNN_H

 朴素贝叶斯

 

main.cpp 

#include <iostream>
#include <vector>
#include "NavieBayes.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new NavieBayes();
    obj->run();
    delete obj;
    return 0;
}

naviebayes.cpp


#include "NavieBayes.h"


using std::string;
using std::vector;
using std::pair;
using std::map;
using std::set;


void NavieBayes::getData(const std::string &filename) {
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size();
        indim -= 1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}



void NavieBayes::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 1;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}


void NavieBayes::maxLikeEstim(){
    for(const auto& gt: trainDataGT){
        priProb[std::to_string(gt)] += 1;
    }
    for(unsigned long i=0;i<indim;++i){
        for(unsigned long j=0;j<trainDataF.size();++j)
        {
            auto cond = std::make_pair(std::to_string(trainDataF[j][i]), std::to_string(trainDataGT[j]));
            condProb[i][cond] += 1.0/priProb[std::to_string(trainDataGT[j])];
        }
    }
    for(auto& iter:priProb)
        iter.second /= double(trainDataF.size());
}



void NavieBayes::bayesEstim(const double& lmbda = 1.0){
    for(const auto& gt: trainDataGT){
        priProb[std::to_string(gt)] += 1.0;
    }
    for(unsigned long i=0;i<indim;++i){
        for(unsigned long j=0;j<trainDataF.size();++j)
        {
            auto cond = std::make_pair(std::to_string(trainDataF[j][i]), std::to_string(trainDataGT[j]));

            condProb[i][cond] += 1.0/(priProb[std::to_string(trainDataGT[j])]+lmbda*xVal[i].size());
        }
    }
    for(unsigned long i=0;i<indim;++i){
        for(auto& d:condProb[i]){
            d.second += lmbda/(priProb[d.first.second]+lmbda*xVal[i].size());
        }
    }

    for(auto& iter:priProb)
        iter.second = (iter.second+lmbda)/(double(trainDataF.size()+yVal.size()));
}


void NavieBayes::initialize() {
    for(unsigned long i=0;i<indim;++i){
        map<pair<string,string>, double> m;
        for(const auto& xval : xVal[i])
            for(const auto& yval:yVal){
                auto cond = std::make_pair(std::to_string(xval), std::to_string(yval));
                m[cond]=0;
            }

        condProb.push_back(m);
    }
    for(const auto& val:yVal)
        priProb[std::to_string(val)]=0;
}


void NavieBayes::train(const string& estim="mle"){
    //train actually is create priProb and condProb.
    if(xVal.empty() && yVal.empty()){
        cout<<"please set range of x and y first."<<endl;
        throw;
    }
    initialize();
    if (estim == "mle")
        maxLikeEstim();
    else {
        if(estim == "byse")
            bayesEstim();
        else{
            cout<<"estimation nust be mle or byse."<<endl;
            throw ;
        }
    }
    cout<<"train over."<<endl;
    for(auto& a:condProb)
        for(auto& c:a){
            cout<<"the conditional probability of "<<c.first.first<<"/"<<c.first.second<<" is: "<<c.second<<endl;
        }
    for(auto& a:priProb)
        cout<<"the priori probability of "<<a.first<<" is: "<<a.second<<endl;
}

void NavieBayes::predict() {
    for(unsigned long j=0;j<testDataF.size();++j){
        double y_t=0;
        double pre = 0;
        cout<<"the test data ture class is "<<testDataGT[j]<<endl;
        for(const auto& y: yVal){
            auto pr = priProb[std::to_string(y)];
            for(unsigned long i=0;i<indim;++i)
                pr *= condProb[i][std::make_pair(std::to_string(testDataF[j][i]), std::to_string(y))];
            cout<<"predict probability of "<<y<<" is "<<pr<<endl;
            if(pr>pre){
                pre = pr;
                y_t = y;
            }

        }
        cout<<"the test data predict class is "<<y_t<<endl;
    }
}

/*naviebayes.txt
1 4 -1
1 5 -1
1 5 1
1 4 1
1 4 -1
2 4 -1
2 5 -1
2 5 1
2 6 1
2 6 1
3 6 1
3 5 1
3 5 1
3 6 1
3 6 -1

*/
void NavieBayes::run() {
    //记得更改样本地址
    getData("../data/naviebayes.txt");
    createTrainTest();
    vector<vector<double>> x {{1,2,3},{4,5,6}};//书中例题第二维的取值是字母,为了方便换成了4,5,6
    vector<double> y {-1,1};
    setInVal(x);
    setOutVal(y);
    train("byse");
    vector<double> testDF {2, 4};
    testDataF.push_back(testDF);
    testDataGT.push_back(-1);
    predict();
}

NavieBayes.h

#ifndef MACHINE_LEARNING_NAVIEBAYES_H
#define MACHINE_LEARNING_NAVIEBAYES_H

#include <vector>
#include <string>
#include <iostream>
#include <map>
#include <set>
#include <utility>
#include "model_base.h"


class NavieBayes: public Base{
private:
    std::vector<std::vector<double>> xVal;
    std::vector<double> yVal;
    std::vector<std::map<std::pair<std::string,std::string>, double>> condProb;
    std::map<std::string, double> priProb;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    void setInVal(std::vector<std::vector<double>>& in ){xVal = in;}
    void setOutVal(std::vector<double>& out){yVal = out;}
    void initialize();
    void createTrainTest();
    void predict();
    void train(const std::string&);
    void maxLikeEstim();
    void bayesEstim(const double& );
};


#endif //MACHINE_LEARNING_NAVIEBAYES_H

决策树

preview

main.h

#include <iostream>
#include <vector>
#include "DecisionTree.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new DecisionTree();
    obj->run();
    delete obj;
    return 0;
}

decisiontree.cpp

#include "DecisionTree.h"
using std::string;
using std::vector;
using std::pair;
using std::map;
using std::priority_queue;
using std::set;
void DecisionTree::getData(const string &filename) {
      // load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout << "reading ..." << std::endl;
    while (!infile.eof()) {
        temData.clear();
        std::getline(infile, line);
        if (line.empty())
            continue;
        std::stringstream stringin(line);
        while (stringin >> onepoint) {
            temData.push_back(onepoint);
        }
        indim = temData.size();
        indim -= 1;
        inData.push_back(temData);
    }
    for (int i = 0; i < indim; ++i)
        features.push_back(i);  // initialize features
    std::cout << "total data is " << inData.size() <<std::endl;
}


void DecisionTree::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 1;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
      //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}

void DecisionTree::initializeRoot(){
    root = new DtreeNode();
}
DtreeNode* DecisionTree::buildTree(DtreeNode* node, vector<vector<double >>& valRange) {
    if (!node)
        return nullptr;
    if (features.empty())
        return nullptr;
    pair<int, double> splitFeatureAndValue = createSplitFeature(valRange);
    node->axis = splitFeatureAndValue.first;
    node->splitVal = splitFeatureAndValue.second;
    set<double> cls_left;
    set<double> cls_right;
    for (const auto& data : valRange){
        if (data[node->axis] == node->splitVal) {
            node->leftTreeVal.push_back(data);
            cls_left.insert(data.back());
        }
        else {
            node->rightTreeVal.push_back(data);
            cls_right.insert(data.back());
        }
    }
    if (cls_left.size()<=1){  //belong to the same class
        node->left = new DtreeNode();
        node->left->isLeaf = true;
        node->left->leafValue = node->leftTreeVal;
    }
    else if (!node->leftTreeVal.empty()) {
        node->left = new DtreeNode();
        node->left = buildTree(node->left, node->leftTreeVal);
    } else{
        return nullptr;
    }
    if (cls_right.size()<=1){  //belong to the same class
        node->right = new DtreeNode();
        node->right->isLeaf = true;
        node->right->leafValue = node->rightTreeVal;
    }
    else if (!node->rightTreeVal.empty()){
        node->right = new DtreeNode();
        node->right = buildTree(node->right, node->rightTreeVal);
    } else{
        return nullptr;
    }
    if (!node->right&&!node->left){
        node->isLeaf=true;
        node->leafValue = valRange;
    }
    return node;
}


pair<int, double> DecisionTree::createSplitFeature(vector<vector<double >>& valRange){
    priority_queue<pair<double, pair<int, double>>, vector<pair<double, pair<int, double>>>, std::greater<pair<double, pair<int, double>>>> minheap;
      //pair<double, pair<int, double>> first value is Gini value, second pair (pair<int, double>) first value is split
      //axis, second value is split value
    vector<map<double, int>> dataDivByFeature(indim);  //vector size is num of axis, map's key is the value of feature, map's value is
      //num belong to feature'value
    vector<set<double>> featureVal(indim);  //store value for each axis
    vector<map<pair<double, double>, int>> datDivByFC(indim);  //vector size is num of axis, map's key is the feature value and class value, map's value is
      //num belong to that feature value and class
    set<double> cls;  //store num of class
    for(const auto& featureId:features) {
        if (featureId<0)
            continue;
        map<double, int> dataDivByF;
        map<pair<double, double>, int> dtDivFC;
        set<double> fVal;
        for (auto& data:valRange){  //below data[featureId] is the value of one feature axis, data.back() is class value
            cls.insert(data.back());
            fVal.insert(data[featureId]);
            if (dataDivByF.count(data[featureId]))
                dataDivByF[data[featureId]] += 1;
            else
                dataDivByF[data[featureId]] = 0;
            if (dtDivFC.count(std::make_pair(data[featureId], data.back())))
                dtDivFC[std::make_pair(data[featureId], data.back())] += 1;
            else
                dtDivFC[std::make_pair(data[featureId], data.back())] = 0;
        }
        featureVal[featureId] = fVal;
        dataDivByFeature[featureId] = dataDivByF;
        datDivByFC[featureId] = dtDivFC;
    }
    for (auto& featureId: features) {  // for each feature axis
        if (featureId<0)
            continue;
        for (auto& feVal: featureVal[featureId]){  //for each feature value
            double gini1 = 0 ;
            double gini2 = 0 ;

            double prob1 = dataDivByFeature[featureId][feVal]/double(valRange.size());
            double prob2 = 1 - prob1;
            for (auto& c : cls){  //for each class
                double pro1 = double(datDivByFC[featureId][std::make_pair(feVal, c)])/dataDivByFeature[featureId][feVal];
                gini1 += pro1*(1-pro1);
                int numC = 0;
                for (auto& feVal2: featureVal[featureId])
                    numC += datDivByFC[featureId][std::make_pair(feVal2, c)];
                double pro2 = double(numC-datDivByFC[featureId][std::make_pair(feVal, c)])/(valRange.size()-dataDivByFeature[featureId][feVal]);
                gini2 += pro2*(1-pro2);
            }
            double gini = prob1*gini1+prob2*gini2;

            minheap.push(std::make_pair(gini, std::make_pair(featureId, feVal)));
        }
    }
    features[minheap.top().second.first]=-1;
    return minheap.top().second;
}

void DecisionTree::showTree(DtreeNode* node) {
    if(node == nullptr)
        return;
    cout<<"the leaf class is "<< bool(node->isLeaf) <<endl;
    cout<<" the splitaxis is "<<node->axis<<endl;
    cout<<" the splitval is "<<node->splitVal<<endl;
    if (node->isLeaf){
        for (auto& data : node->leafValue){
            cout<<"leaf value are(the last value is class): ";
            for (auto& d:data)
                cout<<d<<" ";
            cout<<endl;
        }
    }
    showTree(node->left);
    showTree(node->right);
}
/*decisiontree.txt
1 2 2 3 2
1 2 2 2 2
1 1 2 2 1
1 1 1 3 1
1 2 2 3 2
2 2 2 3 2
2 2 2 2 2
2 1 1 2 1
2 2 1 1 1
2 2 1 1 1
3 2 1 1 1
3 2 1 2 1
3 1 2 2 1
3 1 2 3 1
3 2 2 2 2

*/
void DecisionTree::run() {
    //记得更改样本地址
    getData("../data/decisiontree.txt");
    createTrainTest();
    initializeRoot();
    buildTree(root, trainData);
    showTree(root);
}

DecisionTree.h

#ifndef MACHINE_LEARNING_DECISIONTREE_H
#define MACHINE_LEARNING_DECISIONTREE_H

#include <vector>
#include <string>
#include <iostream>
#include <map>
#include <set>
#include <queue>
#include <utility>
#include "model_base.h"



struct DtreeNode {
    std::vector<std::vector<double>> leafValue;
    int axis;//split axis
    double splitVal;//split value
    bool isLeaf;
    std::vector<std::vector<double>> leftTreeVal;
    std::vector<std::vector<double>> rightTreeVal;
    DtreeNode* left;
    DtreeNode* right;
    DtreeNode(): isLeaf(false), axis(0), splitVal(0.0), left(nullptr), right(nullptr){};

};


class DecisionTree: public Base{
private:
    DtreeNode* root = nullptr;
    std::vector<int> features;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    void createTrainTest();
    void initializeRoot();
    DtreeNode* buildTree(DtreeNode* node, std::vector<std::vector<double >>& valRange);
    std::pair<int, double> createSplitFeature(std::vector<std::vector<double >>& valRange);
    void showTree(DtreeNode* node);

};

#endif //MACHINE_LEARNING_DECISIONTREE_H

逻辑回归

https://github.com/tobeprozy/MachineLearning_Python

main.h


#include <iostream>
#include <vector>
#include "Logistic.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new Logistic();
    obj->run();
    delete obj;
    return 0;
}

logistic.cpp

#include "Logistic.h"

using std::string;
using std::vector;
using std::pair;

void Logistic::getData(const string &filename){
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size();
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}


void Logistic::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 0.6;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainf.push_back(1.0);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testf.push_back(1.0);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}


double Logistic::logistic(const vector<double>& data){
    double expval = exp(w * data);
    return expval/(1.0+expval);
}

void Logistic::initialize(const vector<double>& wInit){
    w = wInit;
}

vector<double> Logistic::computeGradient(const vector<double>& trainFeature, double trainGrT){
    return -1*trainFeature*(trainGrT-logistic(trainFeature));
}

void Logistic::train(const int& step, const double& lr) {
    int count = 0;
    for(int i=0; i<step; ++i) {
        if (count == trainDataF.size() - 1)
            count = 0;
        count++;
        vector<double> grad = computeGradient(trainDataF[count], trainDataGT[count]);
        double fl;
        if (trainDataGT[count]==0)
            fl = 1;
        else
            fl = -1;
        w = w + fl*lr*grad;
        auto val = trainDataF[count]*w;
        double loss = -1*trainDataGT[count]*val + log(1 + exp(val));
        cout<<"step "<<i<<", train loss is "<<loss<<" gt "<<trainDataGT[count]<<endl;
    }
}

double Logistic::predict(const vector<double>& inputData, const double& GT){
    cout<<"The right class is "<<GT<<endl;
    double out = logistic(inputData);
    if(out>=0.5){
        std::cout<<"The predict class is 1"<<std::endl;
        return 1;
    }
    else{
        std::cout<<"The predict class is 0"<<std::endl;
        return 0;
    }
}
/*
logistic.txt
3 4 1
4 4 1
1 1 0
1 2 0
3 6 1
4 6 1
6 6 1
2 1 0
1 3 0
0 1 0

*/


void Logistic::run(){
    //记得修改样本地址
    getData("../data/logistic.txt");
    createTrainTest();
    std::vector<double> init (indim, 0.5);
    initialize(init);
    train(20, 1.0);//20 is steps and 1.0 is learning rate
    for(int i=0; i<testDataF.size(); ++i){
        std::cout<<i<<std::endl;
        predict(testDataF[i], testDataGT[i]);
    }
}

Logistic.h

#ifndef MACHINE_LEARNING_LOGISTIC_H
#define MACHINE_LEARNING_LOGISTIC_H

#include <cmath>
#include <vector>
#include "model_base.h"

class Logistic: public Base{
private:
    vector<double> w;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    double logistic(const std::vector<double>& data);
    void createTrainTest();
    void initialize(const std::vector<double>& );
    void train(const int& step, const double& lr);
    std::vector<double> computeGradient(const std::vector<double>& trainFeature, double trainGrT);
    double predict(const std::vector<double>& inputData, const double& GT);
};


#endif //MACHINE_LEARNING_LOGISTIC_H

支持向量机

 

main.cpp

#include <iostream>
#include <vector>
#include "SVM.h"
using std::vector;
using std::cout;
using std::endl;


int main() {

    Base* obj = new SVM();
    obj->run();
    delete obj;
    return 0;
}

svm.cpp

#include "SVM.h"

using std::string;
using std::vector;
using std::pair;


void SVM::getData(const string &filename){
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size()-1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}


void SVM::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 0.6;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}


void SVM::SMO() {
    /*
     * this function reference the Platt J. Sequential minimal optimization: A fast algorithm for training support vector machines[J]. 1998.
     */
    int numChanged = 0;
    int examineAll = 1;
    while(numChanged > 0 || examineAll){
        numChanged = 0;
        if (examineAll){
            for (int i=0; i<trainDataF.size();++i)
                numChanged+=SMOExamineExample(i);
        }
        else{
            for (int i=0; i<trainDataF.size();++i){
                if(alpha[i]!=0&&alpha[i]!=C)
                    numChanged+=SMOExamineExample(i);
            }
        }
        if(examineAll==1)
            examineAll=0;
        else{
            if(numChanged==0)
                examineAll=1;
        }
    }

}
double SVM::kernel(vector<double> & x1, vector<double> & x2) {
    //here use linear kernel
    return x1 * x2;
}
double SVM::computeE(int& i) {
    double e = 0;
    for(int j =0 ; j < trainDataF.size(); ++j){
        e += alpha[j]*trainDataGT[j]*kernel(trainDataF[j], trainDataF[i]);
    }
    e += b;
    e -= trainDataGT[i];
    //e = w*trainDataF[i]+b-trainDataGT[i];
    return e;

}


pair<double, double> SVM::SMOComputeOB(int& i1, int& i2, double&L, double& H) {
    double y1 = trainDataGT[i1];
    double y2 = trainDataGT[i2];
    double s = y1 * y2;
    double f1 = y1 * (E[i1] + b) - alpha[i1] * kernel(trainDataF[i1], trainDataF[i1]) -
                s * alpha[i2] * kernel(trainDataF[i1], trainDataF[i2]);
    double f2 = y2 * (E[i2] + b) - s * alpha[i1] * kernel(trainDataF[i1], trainDataF[i2]) -
                alpha[i2] * kernel(trainDataF[i2], trainDataF[i2]);
    double L1 = alpha[i1] + s * (alpha[i2] - L);
    double H1 = alpha[i1] + s * (alpha[i2] - H);
    double obL = L1 * f1 + L * f2 + 0.5 * L1 * L1 * kernel(trainDataF[i1], trainDataF[i1]) +
                 0.5 * L * L * kernel(trainDataF[i2], trainDataF[i2]) +
                 s * L * L1 * kernel(trainDataF[i1], trainDataF[i2]);
    double obH = H1 * f1 + H * f2 + 0.5 * H1 * H1 * kernel(trainDataF[i1], trainDataF[i1]) +
                 0.5 * H * H * kernel(trainDataF[i2], trainDataF[i2]) +
                 s * H * H1 * kernel(trainDataF[i1], trainDataF[i2]);
    return std::make_pair(obL, obH);
}


int SVM::SMOTakeStep(int& i1, int& i2) {
    if (i1 == i2)
        return 0;
    double y1 = trainDataGT[i1];
    double y2 = trainDataGT[i2];
    double s = y1 * y2;
    double L, H;
    if (y1 != y2) {
        L = (alpha[i1] - alpha[i2]) > 0 ? alpha[i1] - alpha[i2] : 0;
        H = (alpha[i1] - alpha[i2] + C) < C ? alpha[i1] - alpha[i2] + C : C;
    } else {
        L = (alpha[i1] + alpha[i2] - C) > 0 ? alpha[i1] + alpha[i2] - C : 0;
        H = (alpha[i1] + alpha[i2]) < C ? alpha[i1] + alpha[i2] : C;
    }
    if (L == H)
        return 0;
    double k11 = kernel(trainDataF[i1], trainDataF[i1]);
    double k12 = kernel(trainDataF[i1], trainDataF[i2]);
    double k22 = kernel(trainDataF[i2], trainDataF[i2]);
    double eta = k11 + k22 - 2 * k12;
    double a2;
    if (eta > 0) {
        a2 = alpha[i2] + y2 * (E[i1] - E[i2]) / eta;
        if (a2 < L)
            a2 = L;
        else {
            if (a2 > H)
                a2 = H;
        }
    } else {
        pair<double, double> ob = SMOComputeOB(i1, i2, L, H);
        double Lobj = ob.first;
        double Hobj = ob.second;
        if (Lobj < Hobj - eps)
            a2 = L;
        else {
            if (Lobj > Hobj + eps)
                a2 = H;
            else
                a2 = alpha[i2];
        }
    }
    if (std::abs(a2 - alpha[i2]) < eps * (a2 + alpha[i2] + eps))
        return 0;
    double a1 = alpha[i1] + s * (alpha[i2] - a2);
    double b1;
    //please notice that the update equation is from <<统计学习方法>>p130, not the equation in paper
    b1= -E[i1] - y1 * (a1 - alpha[i1]) * kernel(trainDataF[i1], trainDataF[i1]) -
                y2 * (a2 - alpha[i2]) * kernel(trainDataF[i1], trainDataF[i2]) + b;
    double b2;
    b2 = -E[i2] - y1 * (a1 - alpha[i1]) * kernel(trainDataF[i1], trainDataF[i2]) -
                y2 * (a2 - alpha[i2]) * kernel(trainDataF[i2], trainDataF[i2]) + b;
    double bNew = (b1 + b2) / 2;
    b = bNew;
    w = w + y1 * (a1 - alpha[i1]) * trainDataF[i1] + y2 * (a2 - alpha[i2]) *
                                                     trainDataF[i2];
    //this is the linear SVM case, this equation are from the paper equation 22
    alpha[i1] = a1;
    alpha[i2] = a2;
//    vector<double> wtmp (indim);
//    for (int i=0; i<trainDataF.size();++i)
//    {
//        auto tmp = alpha[i]*trainDataF[i]*trainDataGT[i];
//        wtmp = wtmp+tmp;
//    }
//    w = wtmp;
    E[i1] = computeE(i1);
    E[i2] = computeE(i2);
    return 1;
}

int SVM::SMOExamineExample(int i2){
    double y2 = trainDataGT[i2];
    double alph2 = alpha[i2];
    double E2 = E[i2];
    double r2 = E2*y2;
    if((r2<-tol && alph2<C)||(r2>tol && alph2>0)){
        int alphNum = 0;
        for (auto& a:alpha){
            if (a != 0 && a != C)
                alphNum++;
        }
        if (alphNum>1){
            double dis = 0;
            int i1 ;
            for(int j=0;j<E.size();++j){
                if (std::abs(E[j]-E[i2])>dis){
                    i1 = j;
                    dis = std::abs(E[j]-E[i2]);
                }

            }

            if (SMOTakeStep(i1,i2))
                return 1;
        }
        for (int i = 0; i < alpha.size();++i){
            if (alpha[i] != 0 && alpha[i] != C){
                int i1 = i;
                if (SMOTakeStep(i1, i2))
                    return 1;
            }
        }
        for(int i = 0; i < trainDataF.size();++i){
            int i1 = i;
            if (SMOTakeStep(i1, i2))
                return 1;
        }

    }
    return 0;
}

void SVM::initialize() {
    b = 0;
    for(int i=0;i<trainDataF.size();++i){
        alpha.push_back(0.0);
    }
    for(int i=0;i<indim;++i){
        w.push_back(0.0);
    }
    for(int i=0;i<trainDataF.size();++i){
        double e = computeE(i);
        E.push_back(e);
    }


}

void SVM::train() {
    initialize();
    SMO();
}

double SVM::predict(const vector<double> &inputData) {
    double p = w*inputData+b;
    if(p>0)
        return 1.0;
    else
        return -1.0;
}
/*perceptrondata.txt
3 4 1
1 1 -1
2 4 1
1 2 -1
1 5 1
2 0.5 -1
1 6 1
1 2.5 -1
0.5 6 1
0 1 -1
2 2.5 1
0.5 1 -1
1 4 1
1.5 1 -1
2.7 1 1
2 3.5 1
0.8 3 -1
0.1 4 -1


*/
void SVM::run() {
    getData("../data/perceptrondata.txt");
    createTrainTest();
    train();
    cout<<"w and b is: "<<endl;
    for(auto&c : w)
        cout<<c<<" ";
    cout<<b<< endl;
    for(int i = 0; i<testDataF.size();++i){
        cout<<"the true class of this point is "<<testDataGT[i];
        double pre = predict(testDataF[i]);
        cout<<", the predict class of this point is "<<pre<<endl;

    }
}

SVM.h  

#ifndef MACHINE_LEARNING_SVM_H
#define MACHINE_LEARNING_SVM_H
#include <vector>
#include <utility>
#include <iostream>
#include "model_base.h"

class SVM : public Base{
private:
    std::vector<double> w;
    std::vector<double> alpha;
    double b;
    std::vector<double> E;
    double tol=0.001;
    double eps=0.0005;
    double C=1.0;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    void createTrainTest();
    void SMO();
    int SMOTakeStep(int& i1, int& i2);
    int SMOExamineExample(int i2);
    double kernel(std::vector<double>& , std::vector<double>&);
    double computeE(int& i);
    std::pair<double, double> SMOComputeOB(int& i1, int& i2, double&L, double& H);
    void initialize();
    void train();
    double predict(const std::vector<double>& inputData);
};

#endif //MACHINE_LEARNING_SVM_H

adaBoost

https://www.cnblogs.com/pinard/p/6133937.html

main.h

#include <iostream>
#include <vector>
#include "AdaBoost.h"
using std::vector;
using std::cout;
using std::endl;

int main() {
    Base* obj = new AdaBoost();
    obj->run();
    delete obj;
    return 0;
}

AdaBoost.cpp 

#include "AdaBoost.h"


using std::string;
using std::vector;
using std::pair;


void AdaBoost::getData(const string &filename){
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout<<"reading ..."<<std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin(line);
        while(stringin >> onepoint){
            temData.push_back(onepoint);
        }
        indim = temData.size()-1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}


void AdaBoost::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 0.6;
    std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
    for(int i=0;i<size;++i){
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        featrWeight.push_back(1.0);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
    featrWeight = featrWeight / featrWeight.size();
}

int AdaBoost::computeWeights(Perceptron* classifier) {
    vector<double> trainGT;
    for(int i =0; i<trainDataGT.size();++i)
        trainGT.push_back(trainDataGT[i]*featrWeight[i]);
    classifier->setTrainD(trainDataF, trainGT);
    classifier->setDim(indim);
    classifier->train(100, 0.9);
    double erroeRate = 0;
    for(int i = 0; i<trainDataF.size();++i) {
        if (classifier->predict(trainDataF[i])!=int(trainDataGT[i]))
            erroeRate += featrWeight[i];
    }
    if(erroeRate==0){
        if(clsfWeight.size()==0)
            clsfWeight.push_back(1);
        return 0;
    }

    double clsW;
    clsW = 0.5*std::log((1-erroeRate)/erroeRate);
    clsfWeight.push_back(clsW);


    double zm=0;
    for(int i = 0; i<trainDataF.size();++i) {
        zm+=featrWeight[i]*std::exp(-clsW*trainDataGT[i]*classifier->predict(trainDataF[i]));
    }

    for(int i = 0; i<featrWeight.size();++i ){
        featrWeight[i] = featrWeight[i]/zm*std::exp(-clsW*trainDataGT[i]*classifier->predict(trainDataF[i]));
    }
    return 1;
}

int AdaBoost::predict(vector<double> &testF) {
    double out = 0;
    for(int i = 0; i<clsfWeight.size();++i) {
        out += clsfWeight[i] * classifiers[i]->predict(testF);
    }
    if (out > 0)
        return 1;
    else
        return -1;
}

/*perceptrondata.txt
3 4 1
1 1 -1
2 4 1
1 2 -1
1 5 1
2 0.5 -1
1 6 1
1 2.5 -1
0.5 6 1
0 1 -1
2 2.5 1
0.5 1 -1
1 4 1
1.5 1 -1
2.7 1 1
2 3.5 1
0.8 3 -1
0.1 4 -1


*/

void AdaBoost::run() {
//    记得修改样本地址
    getData("../data/perceptrondata.txt");
    createTrainTest();

    int isContinue = 1;
    while(isContinue){
        Perceptron* cls = new Perceptron();
        isContinue = computeWeights(cls);
        if(isContinue || classifiers.size()==0)
            classifiers.push_back(cls);

    }
    for(int i=0; i<testDataF.size(); ++i){
        std::cout<<i<<std::endl;
        std::cout<<"The right class is "<<testDataGT[i]<<std::endl;
        int out = predict(testDataF[i]);
        std::cout<<"The predict class is "<<out<<std::endl;
    }
    int nfc = clsfWeight.size();
    nfc = nfc == 0 ? 1:nfc;
    cout<<"The number of classfiers is "<<nfc<<endl;
    for(int i = 0; i<classifiers.size();++i)
        delete classifiers[i];
}

AdaBoost.h

#ifndef MACHINE_LEARNING_ADABOOST_H
#define MACHINE_LEARNING_ADABOOST_H

#include <vector>
#include "model_base.h"
#include "perceptron.h"

class AdaBoost: public Base {
private:
    vector<double> clsfWeight;
    vector<double> featrWeight;
    vector<Perceptron* > classifiers;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    void createTrainTest();
    int computeWeights(Perceptron* classifier);
    int predict(vector<double>& testF);
};


#endif //MACHINE_LEARNING_ADABOOST_H

GMM 

图2

main.h 

#include <iostream>
#include <vector>
#include "GMM.h"
using std::vector;
using std::cout;
using std::endl;


int main() {
    Base* obj = new GMM();
    obj->run();
    delete obj;
    return 0;
}

GMM.cpp

#include "GMM.h"

using std::string;
using std::vector;
using std::cout;
using std::endl;


void GMM::getData(const std::string &filename) {
    //load data to a vector
    std::vector<double> temData;
    double onepoint;
    std::string line;
    inData.clear();
    std::ifstream infile(filename);
    std::cout << "reading ..." << std::endl;
    while(!infile.eof()){
        temData.clear();
        std::getline(infile, line);
        if(line.empty())
            continue;
        std::stringstream stringin (line);
        while (stringin >> onepoint) {
            temData.push_back(onepoint);
        }
        indim = temData.size();
        indim -= 1;
        inData.push_back(temData);
    }
    std::cout<<"total data is "<<inData.size()<<std::endl;
}



void GMM::createTrainTest() {
    std::random_shuffle(inData.begin(), inData.end());
    unsigned long size = inData.size();
    unsigned long trainSize = size * 0.7;
    std::cout << "total data is " << size << " ,train data has " << trainSize << std::endl;
    for (int i = 0;i < size; ++i) {
        if (i<trainSize)
            trainData.push_back(inData[i]);
        else
            testData.push_back(inData[i]);

    }
    //create feature for test,using trainData, testData
    for (const auto& data:trainData){
        std::vector<double> trainf;
        trainf.assign(data.begin(), data.end()-1);
        trainDataF.push_back(trainf);
        trainDataGT.push_back(*(data.end()-1));
    }
    for (const auto& data:testData){
        std::vector<double> testf;
        testf.assign(data.begin(), data.end()-1);
        testDataF.push_back(testf);
        testDataGT.push_back(*(data.end()-1));
    }
}
double GMM::getDet(const vector<vector<double>> &mat, int ignoreCol=-1) {
    // compute determinant of a matrix
    if (mat.empty())
        throw "mat must be a Square array";
    if (mat.size() == 1) {
        return mat[0][0];
    }
    if (mat.size() == 2) {
        if (mat[0].size() != 2 || mat[1].size() != 2)
            throw "mat must be a Square array";
        return mat[0][0]*mat[1][1] - mat[0][1]*mat[1][0];
    }
    double det = 0;
    for (int numCol = 0; numCol < mat.size(); ++numCol) {
        // below is to compute sub mat.
        vector<vector<double>> newMat;
        ignoreCol = numCol;
        for (int i = 0 ; i < mat.size(); ++i) {
            vector<double> matRow;
            for (int j = 0; j < mat.size(); ++j) {
                if (i == 0 || j == ignoreCol)
                    continue;
                matRow.push_back(mat[i][j]);
            }
            if (matRow.size()!=0)
                newMat.push_back(matRow);
        }
        int factor;
        factor = numCol%2 == 0 ? 1 : -1;
        det += factor*mat[0][numCol]*getDet(newMat, numCol);
    }
    return det;
}


vector<vector<double>> GMM::matInversion(vector<vector<double>> &mat) {
    // compute Inversion of a matrix
    double det = getDet(mat);
    if (std::abs(det)<1e-10)
        std::cerr<< "det of mat must not be 0" << endl;
    vector<vector<double>> invMat (mat.size(), vector<double>(mat.size(), 0));
    for (int i = 0 ; i < invMat.size(); ++i) {
        for (int j = 0; j < invMat.size(); ++j) {
            // below is to compute sub mat.
            vector<vector<double>> newMat;
            for (int x = 0; x < mat.size(); ++x) {
                vector<double> matRow;
                for (int y = 0; y < mat.size(); ++y) {
                    if (x == i || y == j)
                        continue;
                    matRow.push_back(mat[i][j]);
                }
                if (!matRow.empty())
                    newMat.push_back(matRow);
            }
            invMat[j][i] = getDet(newMat) / det; // note the i and j
        }
    }
    return invMat;

}
double GMM::gaussian(vector<double>& muI, vector<vector<double>>& sigmaI,
                     vector<double>& observeValue) {
    vector<double> xMinusMu = observeValue - muI;
    vector<double> rightMul;
    vector<vector<double>> matInvers = transpose(matInversion(sigmaI));
    // for compute convenience, i use the transpose mat for my operator *
    for (auto& vec : matInvers) {
        rightMul.push_back(xMinusMu*vec);
    }
    double finalMul = rightMul*xMinusMu;
    double det = getDet(sigmaI);
    double gaussianVal;
    gaussianVal = 1 / (std::pow(2 * 3.14, indim/2) * std::pow(det, 0.5)) * std::exp(-0.5 * finalMul);
}

void GMM::EMAlgorithm(vector<double> &alphaOld, vector<vector<vector<double>>> &sigmaOld,
        vector<vector<double>> &muOld) {
// compute gamma
    for (int i = 0; i < trainDataF.size(); ++i) {
        double probSum = 0;
        for (int l = 0; l < alpha.size(); ++l) {
            double gas = gaussian(muOld[l], sigmaOld[l], trainDataF[i]);
            probSum += alphaOld[l] * gas;
        }
        for (int k = 0; k < alpha.size(); ++k) {
            double gas = gaussian(muOld[k], sigmaOld[k], trainDataF[i]);
            gamma[i][k] = alphaOld[k] * gas / probSum;
        }
    }
// update mu, sigma, alpha
    for (int k = 0; k < alpha.size(); ++k) {
        vector<double> muNew;
        vector<vector<double>> sigmaNew;
        double alphaNew;
        vector<double> muNumerator;
        double sumGamma = 0.0;
        for (int i = 0; i < trainDataF.size(); ++i) {
            sumGamma += gamma[i][k];
            if (i==0) {
                muNumerator = gamma[i][k] * trainDataF[i];
            }
            else {
                muNumerator = muNumerator + gamma[i][k] * trainDataF[i];
            }
        }
        muNew = muNumerator / sumGamma;
        for (int i = 0; i < trainDataF.size(); ++i) {
            if (i==0) {
                auto temp1 = gamma[i][k]/ sumGamma * (trainDataF[i] - muNew);
                auto temp2 = trainDataF[i] - muNew;
                sigmaNew = vecMulVecToMat(temp1, temp2);
            }
            else {
                auto temp1 = gamma[i][k] / sumGamma * (trainDataF[i] - muNew);
                auto temp2 = trainDataF[i] - muNew;
                sigmaNew = sigmaNew + vecMulVecToMat(temp1, temp2);
            }
        }
        alphaNew = sumGamma / trainDataF.size();
        mu[k] = muNew;
        sigma[k] = sigmaNew;
        alpha[k] = alphaNew;
    }
}

void GMM::train(int steps, int k) {
    // Initialize the variable
    if (alpha.empty() && mu.empty() && sigma.empty() && gamma.empty()) {
        for (int i = 0; i < k; ++i) {
            alpha.push_back(1.0/k);
            for (int index = 0; index < trainDataGT.size(); ++index){
                if((int)trainDataGT[index] == i+1) {
                    mu.push_back(trainDataF[index]);
                    break;
                }
            }
            vector<vector<double>> sigm (indim, vector<double> (indim));
            for (int row = 0; row < indim; ++row) {
                for (int col = 0; col < indim; ++col){
                    if (row == col)
                        sigm[row][col] = 0.1;
                }
            }
            sigma.push_back(sigm);
        }
        for (int i = 0; i < trainDataF.size(); ++i) {
            vector<double> gammaTemp;
            for (int j = 0; j < k; ++j)
                gammaTemp.push_back(1.0/(trainDataF.size() * k));
            gamma.push_back(gammaTemp);
        }
    }
    for (int step = 0; step < steps ; ++step)
        EMAlgorithm(alpha, sigma, mu);
    vector<vector<double>> vote (alpha.size(), vector<double> (alpha.size()));
    for (int i = 0; i < trainDataF.size(); ++i) {
        double prob = 0.0;
        int index = -1;
        for (int l = 0; l < alpha.size(); ++l) {
            double probk = gaussian(mu[l], sigma[l], trainDataF[i]);
            if (probk > prob) {
                prob = probk;
                index = l;
            }
        }
        int cls = (int)trainDataGT[i]-1;
        vote[index][cls] += 1;
    }
    gaussVote = vote;
}


int GMM::predict(vector<double>& testF, double& testGT) {
    cout << "the true class is " << testGT << endl;
    double prob = 0.0;
    int index = -1;
    for (int k = 0; k < alpha.size(); ++k) {
        double probk = gaussian(mu[k], sigma[k], testF);
        if (probk > prob) {
            prob = probk;
            index = k;
        }
    }
    int pred = std::distance(gaussVote[index].begin(),
                             std::max_element(gaussVote[index].begin(), gaussVote[index].end()));
    cout << "the predict class is " << pred+1 << endl;
    return pred;
}

/*GMM.txt
1 1 1
1 1.5 1
1 2.5 1
2.5 4 2
2.5 5 2
3 3 2
4 1.5 3
4 2 3
5 1 3
2 2 1
1.5 2 1
1.5 4 1
2.5 6 2
3 4 2
3 5 2
4 5 2
4.5 1 3
4.5 1.5 3
4.5 2 3
5 2 3
2.5 1 1

*/
void GMM::run() {
    //记得修改样本地址
    getData("../data/GMM.txt");
    createTrainTest();
    train(10, 3);
    for(int i = 0; i < testDataF.size(); ++i) {
        predict(testDataF[i], testDataGT[i]);
    }
}

GMM.h

//
// Created by wyb on 19-2-27.
//

#ifndef MACHINE_LEARNING_GMM_H
#define MACHINE_LEARNING_GMM_H
#include <vector>
#include <string>

#include "model_base.h"

class GMM : public Base {
private:
    std::vector<double> alpha;
    std::vector<std::vector<std::vector<double>>> sigma;
    std::vector<std::vector<double>> mu;
    std::vector<std::vector<double>> gamma;
    std::vector<std::vector<double>> gaussVote;
public:
    virtual void getData(const std::string &filename);
    virtual void run();
    void createTrainTest();
    void EMAlgorithm(std::vector<double>& alphaOld,
                     std::vector<std::vector<std::vector<double>>>& sigmaOld,
                     std::vector<std::vector<double>>& muOld);
    void train(int steps, int k);
    double gaussian(std::vector<double>& muI, std::vector<std::vector<double>>& sigmaI,
                    vector<double>& observeValue);
    double getDet(const std::vector<std::vector<double>>& mat, int ignoreCol);
    std::vector<std::vector<double>> matInversion(std::vector<std::vector<double>>& mat);
    int predict(vector<double>& testF, double& testGT);
};

#endif //MACHINE_LEARNING_GMM_H

  • 41
    点赞
  • 287
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 9
    评论
五子棋是一种策略性棋类游戏,通常在一个15×15的棋盘上进行。机器学习是一种人工智能的技术,通过对大量数据进行学习和训练,使计算机能够自动获取知识和改进性能。 在五子棋中使用机器学习可以帮助计算机更好地理解并学习如何玩这个游戏。一种常见的机器学习方法是利用强化学习算法,通过与自己进行对弈和与人类玩家对弈来不断优化自己的棋局判断和决策能力。 首先,计算机可以通过与自己对弈来进行学习。通过不断模拟对局并记录每一步的胜负情况,计算机可以通过反馈学习哪些决策是好的,哪些是坏的。这种自我对弈的方式可以让计算机不断改进自己的棋局判断能力和策略选择能力。 其次,计算机还可以通过与人类玩家对弈来学习。通过与经验丰富的人类玩家对弈,计算机可以从人类的策略和决策中学习并优化自己的棋局判断和策略选择。这种与人类对弈的方式可以让计算机通过学习人类的经验,提高自己的棋局分析能力和决策水平。 最后,当计算机经过大量训练和学习后,就可以通过机器学习算法自主决策每一步棋的具体位置。这样的机器学习方法可以让计算机在五子棋游戏中具备较为优秀的棋局判断和决策能力,从而提高其与人类玩家的对弈水平。 综上所述,通过利用机器学习算法来训练和优化计算机在五子棋游戏中的棋局判断和决策能力,可以使计算机具备更高的对弈水平,并且能够与人类玩家进行有趣的互动对局。
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

南叔先生

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值