#include "stdio.h"
#include "math.h"
double x[6][4]={{1.0,47.0,76.0,24.0}, //include x0=1
{1.0,46.0,77.0,23.0},
{1.0,48.0,74.0,22.0},
{1.0,34.0,76.0,21.0},
{1.0,35.0,75.0,24.0},
{1.0,34.0,77.0,25.0},
};
double y[]={1.0,1.0,1.0,0.0,0.0,0.0};
double theta[]={1.0,1.0,1.0,1.0}; // include theta0
int i,j,k;
double ez,gz,gh;
double fun_g(double z){//重点是定义函数g(z),之后可以调用
ez= pow(2.718,z);
gz=ez/(1+ez);
return gz;
}
int main(){
double fun_g(double z);
for(i=1;i<1000;i++){
for(j=0;j<6;j++){
double h=0.0;
for(k=0;k<4;k++){//记住j,k都从0开始
h=h+theta[k]*x[j][k];
}
gh=fun_g(h) ;
double error=0.0;
error=y[j]-gh;
for(k=0;k<4;k++){
theta[k]=theta[k]+0.01*error*x[j][k];
}
printf("%lf %lf %lf %lf\t",theta[0],theta[1],theta[2],theta[3]);
}
}
#include "math.h"
double x[6][4]={{1.0,47.0,76.0,24.0}, //include x0=1
{1.0,46.0,77.0,23.0},
{1.0,48.0,74.0,22.0},
{1.0,34.0,76.0,21.0},
{1.0,35.0,75.0,24.0},
{1.0,34.0,77.0,25.0},
};
double y[]={1.0,1.0,1.0,0.0,0.0,0.0};
double theta[]={1.0,1.0,1.0,1.0}; // include theta0
int i,j,k;
double ez,gz,gh;
double fun_g(double z){//重点是定义函数g(z),之后可以调用
ez= pow(2.718,z);
gz=ez/(1+ez);
return gz;
}
int main(){
double fun_g(double z);
for(i=1;i<1000;i++){
for(j=0;j<6;j++){
double h=0.0;
for(k=0;k<4;k++){//记住j,k都从0开始
h=h+theta[k]*x[j][k];
}
gh=fun_g(h) ;
double error=0.0;
error=y[j]-gh;
for(k=0;k<4;k++){
theta[k]=theta[k]+0.01*error*x[j][k];
}
printf("%lf %lf %lf %lf\t",theta[0],theta[1],theta[2],theta[3]);
}
}
}
logistics回归和线性回归的参数迭代学习规则形式完全一样,但意义不同,logistics本质上是梯度上升,logistics回归是凹函数,找局部最大值,一定是全局最大值
logistics回归用来处理分类问题,处理离散数据
http://blog.csdn.net/pennyliang/article/details/7045372
【这是别人Logistic Regression求解classification问题 的代码】
Given Summed Input:
x =
Instead of threshold, and fire/not fire,
we could have continuous output y according to the sigmoid function:设定了一个sigmoid函数
Note e and its properties.
As x goes to minus infinity, y goes to 0 (tends not to fire).
As x goes to infinity, y goes to 1 (tends to fire):
At x=0, y=1/2