核心两行代码,
向量化
比较两者运算速度
运行速度提升100倍
#性能测试
#性能测试
m = 10000000
big_x = np.random.random(size=m)
print(big_x)
big_y = big_x * 2.0 + 3.0 + np.random.normal(size=m)
向量化的
%timeit reg2.fit(big_x,big_y)
没向量化的
%timeit reg1.fit(big_x,big_y)
11.8 s ± 249 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
134 ms ± 3.2 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
num = (x_train - x_mean).dot(y_train - y_mean)
d = (x_train - x_mean).dot(x_train - x_mean)
class Simple_linear_Regression2:
def __init__(self):
"""初始化Simple Line Regression模型"""
self.a_ = None
self.b_ = None
def fit(self,x_train,y_train):
"""根据训练数据集x_train和y_train训练Simple_linear_Regression模型"""
assert x_train.ndim == 1,\
"Simple Linear Regression can only solve single feature training data"
assert len(x_train) == len(y_train),\
"the size of x_train must be equal to the size of y_train"
# self.x_train = x_train
# self.y_train = y_train
x_mean = np.mean(x_train)
y_mean = np.mean(y_train)
num = 0.0
d = 0.0
#使用向量化运算
num = (x_train - x_mean).dot(y_train - y_mean)
d = (x_train - x_mean).dot(x_train - x_mean)
# for x,y in zip(x_train,y_train):
# num += (x - x_mean) * (y - y_mean)
# d += (x -x_mean) ** 2
self.a_ = num / d #a_一般这样写是给函数里面计算得出的属性的
self.b_ = y_mean - self.a_ * x_mean
return self
#这块自己写的没sklearn内味儿
# def predict(self,x_predict):
# """给定待测数据集x_predict,返回表示x_predict的结果向量"""
# y_predict = [ x * self.a_ + self.b_ for x in x_predict]
# return y_predict
#重新写的
def predict(self, x_predict):
"""给定待测数据集x_predict,返回表示x_predict的结果向量"""
assert x_predict.ndim == 1,\
"Simple Linear Regression can only solve single feature training data."
assert self.a_ is not None and self.b_ is not None,\
"must fit before predict!"
return np.array([self._predict(x) for x in x_predict])
def _predict(self,x_single):
"""给定单个待遇测数据x_single,返回x_single的预测结果值"""
return self.a_ * x_single + self.b_
def __repr__(self):
return 'Simple_linear_Regression2'