决策树自编程和sklearn实现

在这里插入图片描述

在这里插入图片描述

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
import math
from collections import Counter


def create_datas():
    datasets = [['青年', '否', '否', '一般', '否'],
                ['青年', '否', '否', '好', '否'],
                ['青年', '是', '否', '好', '是'],
                ['青年', '是', '是', '一般', '是'],
                ['青年', '否', '否', '一般', '否'],
                ['中年', '否', '否', '一般', '否'],
                ['中年', '否', '否', '好', '否'],
                ['中年', '是', '是', '好', '是'],
                ['中年', '否', '是', '非常好', '是'],
                ['中年', '否', '是', '非常好', '是'],
                ['老年', '否', '是', '非常好', '是'],
                ['老年', '否', '是', '好', '是'],
                ['老年', '是', '否', '好', '是'],
                ['老年', '是', '否', '非常好', '是'],
                ['老年', '否', '否', '一般', '否'],
                ]
    labels = ['年龄', '有工作', '有自己的房子', '信贷情况', '类别']
    return datasets, labels


datasets00, labels = create_datas()
train_datas = pd.DataFrame(datasets00, columns=labels)


# print(train_datas.iloc[1, 2])
# a = train_datas.iloc[2, -1]
# print([train_datas.iloc[1].tolist()])
# index = train_datas.columns.values.tolist().index('有工作')
# group = train_datas.groupby('年龄')
# print(group.all())
# print(train_datas.columns.values.tolist().index('有工作'))


def calculate_entropy(datasets):
    # 传参进来dataframe数组
    total_len = len(datasets)
    label_count = {}

    for i in range(len(datasets)):
        label = datasets.iloc[i, -1]
        if label not in label_count:
            label_count[label] = 0
        label_count[label] += 1
    ent = (-1) * sum([(i / total_len) * math.log((i / total_len), 2) for i in label_count.values()])
    return ent


def split_datasets(datasets, feature):
    # 把一个大的dataframe按照featuire不同取值拆分成多张dataframe
    index = datasets.columns.values.tolist().index(feature)
    results = {}
    for i in range(len(datasets)):
        label = datasets.iloc[i, index]
        if label not in results:
            results[label] = [datasets.iloc[i].tolist()]
        else:
            results[label].append(datasets.iloc[i].tolist())
    all_done = []
    for j in results.keys():
        all_done.append(pd.DataFrame(results[j], columns=datasets.columns.values.tolist()))
    return all_done


def calculate_conditon_entropy(datasets, feature):
    # 传参进来dataframe数组 和 某一个特征
    # index = datasets.columns.values.tolist().index(feature)
    # # 按照特征A来对原始数据集分类
    # labels = {}
    # for i in range(len(datasets)):
    #     label = datasets.iloc[i, index]
    #     if label not in labels:
    #         labels[label] = 0
    #     else:
    #         labels[label] += 1
    total = len(datasets)
    feature_condition_entropy = sum(
        [len(Di) / total * calculate_entropy(Di) for Di in split_datasets(datasets, feature)])
    return feature_condition_entropy


def info_gain(datasets, feature):
    return calculate_entropy(datasets) - calculate_conditon_entropy(datasets, feature)


def info_gain_ratio(datasets, feature):
    fen_zi = info_gain(datasets, feature)
    fen_mu = (-1) * sum([len(ziji) / len(datasets) * math.log2(len(ziji) / len(datasets)) for ziji in
                         split_datasets(datasets, feature)])
    return fen_zi / fen_mu


def info_gain_train(datasets):
    features = datasets.columns.values.tolist()
    results = {}
    for feature in features[0:-1]:
        results[feature] = info_gain(datasets, feature)
    return results


def info_gain_ratio_train(datasets):
    features = datasets.columns.values.tolist()
    results = {}
    for feature in features[0:-1]:
        results[feature] = info_gain_ratio(datasets, feature)
    return results


my_result = info_gain_ratio_train(train_datas)
print(my_result)
print(max(my_result))
print('首先在所有特征里面选择:有自己的房子,这个特征作为决策树的根节点')


# sklearn练习

def get_datas():
    iris = datasets.load_iris()
    df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
    df['label'] = iris['target']
    datas = np.array(df.iloc[:, :])
    return datas[:, :-1], datas[:, -1]


x, y = get_datas()
x_train, x_teat, y_train, y_test = train_test_split(x, y, test_size=0.3)

from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
import graphviz
clf = DecisionTreeClassifier()
clf.fit(x_train, y_train)
print(clf.score(x_teat, y_test))

tree_pic = export_graphviz(clf, out_file="mytree.pdf")
with open('mytree.pdf') as f:
    dot_graph = f.read()
graphviz.Source(dot_graph)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值