从头开始:用Python实现随机森林算法
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
print('Scores: %s' % scores)
print('Trees: %d' % n_trees)
scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features)
for n_trees in [1, 5, 10]:
n_features = int(sqrt(len(dataset[0])-1))
sample_size = 1.0
min_size = 1
max_depth = 10
n_folds = 5
# evaluate algorithm
str_column_to_int(dataset, len(dataset[0])-1)
# convert class column to integers
str_column_to_float(dataset, i)
for i in range(0, len(dataset[0])-1):
# convert string attributes to integers
dataset = load_csv(filename)
filename = 'sonar.all-data.csv'
# load and prepare data
seed(1)
# Test the random forest algorithm
return(predictions)
predictions = [bagging_predict(trees, row) for row in test]
trees.append(tree)
tree = build_tree(sample, max_depth, min_size, n_features)
sample = subsample(train, sample_size)
for i in range(n_trees):
trees = list()
def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features):
# Random Forest Algorithm
return max(set(predictions), key=predictions.count)
predictions = [predict(tree, row) for tree in trees]
def bagging_predict(trees, row):
# Make a prediction with a list of bagged trees
return sample
sample.append(dataset[index])
index = randrange(len(dataset))
while len(sample) < n_sample:
n_sample = round(len(dataset) * ratio)
sample = list()
def subsample(dataset, ratio):
# Create a random subsample from the dataset with replacement
return node['right']
else:
return predict(node['right'], row)
if isinstance(node['right'], dict):
else:
return node['left']
else:
return predict(node['left'], row)
if isinstance(node['left'], dict):
if row[node['index']] < node['value']:
def predict(node, row):
# Make a prediction with a decision tree
return root
split(root, max_depth, min_size, n_features, 1)
root = get_split(dataset, n_features)
def build_tree(train, max_depth, min_size, n_features):
# Build a decision tree
split(node['right'], max_depth, min_size, n_features, depth+1)
node['right'] = get_split(right, n_features)
else:
node['right'] = to_terminal(right)
if len(right) <= min_size:
# process right child
split(node['left'], max_depth, min_size, n_features, depth+1)
node['left'] = get_split(left, n_features)
else:
node['left'] = to_terminal(left)
if len(left) <= min_size:
# process left child
return
node['left'], node['right'] = to_terminal(left), to_terminal(right)
if depth >= max_depth:
# check for max depth
return
node['left'] = node['right'] = to_terminal(left + right)
if not left or not right:
# check for a no split
del(node['groups'])
left, right = node['groups']
def split(node, max_depth, min_size, n_features, depth):
# Create child splits for a node or make terminal
return max(set(outcomes), key=outcomes.count)
outcomes = [row[-1] for row in group]
def to_terminal(group):
# Create a terminal node value
return {'index':b_index, 'value':b_value, 'groups':b_groups}
b_index, b_value, b_score, b_groups = index, row[index], gini, groups
if gini < b_score:
gini = gini_index(groups, class_values)
groups = test_split(index, row[index], dataset)
for row in dataset:
for index in features:
features.append(index)
if index not in features:
index = randrange(len(dataset[0])-1)
while len(features) < n_features:
features = list()
b_index, b_value, b_score, b_groups = 999, 999, 999, None
class_values = list(set(row[-1] for row in dataset))
def get_split(dataset, n_features):
# Select the best split point for a dataset
return gini
gini += (proportion * (1.0 - proportion))
proportion = [row[-1] for row in group].count(class_value) / float(size)
continue
if size == 0:
size = len(group)
for group in groups:
for class_value in class_values:
gini = 0.0
def gini_index(groups, class_values):
# Calculate the Gini index for a split dataset
return left, right
right.append(row)
else:
left.append(row)
if row[index] < value:
for row in dataset:
left, right = list(), list()
def test_split(index, value, dataset):
# Split a dataset based on an attribute and an attribute value
return scores
scores.append(accuracy)
accuracy = accuracy_metric(actual, predicted)
actual = [row[-1] for row in fold]
predicted = algorithm(train_set, test_set, *args)
row_copy[-1] = None
test_set.append(row_copy)
row_copy = list(row)
for row in fold:
test_set = list()
train_set = sum(train_set, [])
train_set.remove(fold)
train_set = list(folds)
for fold in folds:
scores = list()
folds = cross_validation_split(dataset, n_folds)
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
# Evaluate an algorithm using a cross validation split
return correct / float(len(actual)) * 100.0
correct += 1
if actual[i] == predicted[i]:
for i in range(len(actual)):
correct = 0
def accuracy_metric(actual, predicted):
# Calculate accuracy percentage
return dataset_split
dataset_split.append(fold)
fold.append(dataset_copy.pop(index))
index = randrange(len(dataset_copy))
while len(fold) < fold_size:
fold = list()
for i in range(n_folds):
fold_size = len(dataset) / n_folds
dataset_copy = list(dataset)
dataset_split = list()
def cross_validation_split(dataset, n_folds):
# Split a dataset into k folds
return lookup
row[column] = lookup[row[column]]
for row in dataset:
lookup[value] = i
for i, value in enumerate(unique):
lookup = dict()
unique = set(class_values)
class_values = [row[column] for row in dataset]
def str_column_to_int(dataset, column):
# Convert string column to integer
row[column] = float(row[column].strip())
for row in dataset:
def str_column_to_float(dataset, column):
# Convert string column to float
return dataset
dataset.append(row)
continue
if not row:
for row in csv_reader:
csv_reader = reader(file)
with open(filename, 'r') as file:
dataset = list()
def load_csv(filename):
# Load a CSV file
from math import sqrt
from csv import reader
from random import randrange
from random import seed
# Random Forest Algorithm on Sonar Dataset
来源: http://www.36dsj.com/archives/93623